if_rt.c revision 257341
1248590Smm/*-
2248590Smm * Copyright (c) 2011, Aleksandr Rybalko
3248590Smm * based on hard work
4248590Smm * by Alexander Egorenkov <egorenar@gmail.com>
5248590Smm * and by Damien Bergamini <damien.bergamini@free.fr>
6248590Smm * All rights reserved.
7248590Smm *
8248590Smm * Redistribution and use in source and binary forms, with or without
9248590Smm * modification, are permitted provided that the following conditions
10248590Smm * are met:
11248590Smm * 1. Redistributions of source code must retain the above copyright
12248590Smm *    notice unmodified, this list of conditions, and the following
13248590Smm *    disclaimer.
14248590Smm * 2. Redistributions in binary form must reproduce the above copyright
15248590Smm *    notice, this list of conditions and the following disclaimer in the
16248590Smm *    documentation and/or other materials provided with the distribution.
17248590Smm *
18248590Smm * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19248590Smm * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20248590Smm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21248590Smm * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22248590Smm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23248590Smm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24248590Smm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25248590Smm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26248590Smm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27248590Smm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28248590Smm * SUCH DAMAGE.
29248590Smm */
30248590Smm
31248590Smm#include <sys/cdefs.h>
32248590Smm__FBSDID("$FreeBSD: head/sys/dev/rt/if_rt.c 257341 2013-10-29 14:19:42Z nwhitehorn $");
33248590Smm
34248590Smm#include "if_rtvar.h"
35248590Smm#include "if_rtreg.h"
36248590Smm
37248590Smm#include <net/if.h>
38248590Smm#include <net/if_var.h>
39248590Smm#include <net/if_arp.h>
40248590Smm#include <net/ethernet.h>
41248590Smm#include <net/if_dl.h>
42248590Smm#include <net/if_media.h>
43248590Smm#include <net/if_types.h>
44248590Smm#include <net/if_vlan_var.h>
45248590Smm
46248590Smm#include <net/bpf.h>
47248590Smm
48248590Smm#include <machine/bus.h>
49248590Smm#include <machine/cache.h>
50248590Smm#include <machine/cpufunc.h>
51248590Smm#include <machine/resource.h>
52248590Smm#include <vm/vm_param.h>
53248590Smm#include <vm/vm.h>
54248590Smm#include <vm/pmap.h>
55248590Smm#include <machine/pmap.h>
56248590Smm#include <sys/bus.h>
57248590Smm#include <sys/rman.h>
58248590Smm
59248590Smm#include <dev/mii/mii.h>
60248590Smm#include <dev/mii/miivar.h>
61248590Smm
62248590Smm#include <mips/rt305x/rt305x_sysctlvar.h>
63248590Smm#include <mips/rt305x/rt305xreg.h>
64248590Smm
65248590Smm#ifdef IF_RT_PHY_SUPPORT
66248590Smm#include "miibus_if.h"
67248590Smm#endif
68248590Smm
69248590Smm/*
70248590Smm * Defines and macros
71248590Smm */
72248590Smm#define	RT_MAX_AGG_SIZE			3840
73248590Smm
74248590Smm#define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
75248590Smm
76248590Smm#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
77248590Smm#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
78248590Smm
79248590Smm#define	RT_TX_WATCHDOG_TIMEOUT		5
80248590Smm
81248590Smm/*
82248590Smm * Static function prototypes
83248590Smm */
84248590Smmstatic int	rt_probe(device_t dev);
85248590Smmstatic int	rt_attach(device_t dev);
86248590Smmstatic int	rt_detach(device_t dev);
87248590Smmstatic int	rt_shutdown(device_t dev);
88248590Smmstatic int	rt_suspend(device_t dev);
89248590Smmstatic int	rt_resume(device_t dev);
90248590Smmstatic void	rt_init_locked(void *priv);
91248590Smmstatic void	rt_init(void *priv);
92248590Smmstatic void	rt_stop_locked(void *priv);
93248590Smmstatic void	rt_stop(void *priv);
94248590Smmstatic void	rt_start(struct ifnet *ifp);
95248590Smmstatic int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96248590Smmstatic void	rt_periodic(void *arg);
97248590Smmstatic void	rt_tx_watchdog(void *arg);
98248590Smmstatic void	rt_intr(void *arg);
99248590Smmstatic void	rt_tx_coherent_intr(struct rt_softc *sc);
100248590Smmstatic void	rt_rx_coherent_intr(struct rt_softc *sc);
101248590Smmstatic void	rt_rx_delay_intr(struct rt_softc *sc);
102248590Smmstatic void	rt_tx_delay_intr(struct rt_softc *sc);
103248590Smmstatic void	rt_rx_intr(struct rt_softc *sc);
104248590Smmstatic void	rt_tx_intr(struct rt_softc *sc, int qid);
105248590Smmstatic void	rt_rx_done_task(void *context, int pending);
106248590Smmstatic void	rt_tx_done_task(void *context, int pending);
107248590Smmstatic void	rt_periodic_task(void *context, int pending);
108248590Smmstatic int	rt_rx_eof(struct rt_softc *sc, int limit);
109248590Smmstatic void	rt_tx_eof(struct rt_softc *sc,
110248590Smm		    struct rt_softc_tx_ring *ring);
111248590Smmstatic void	rt_update_stats(struct rt_softc *sc);
112248590Smmstatic void	rt_watchdog(struct rt_softc *sc);
113248590Smmstatic void	rt_update_raw_counters(struct rt_softc *sc);
114248590Smmstatic void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
115248590Smmstatic void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
116248590Smmstatic int	rt_txrx_enable(struct rt_softc *sc);
117248590Smmstatic int	rt_alloc_rx_ring(struct rt_softc *sc,
118248590Smm		    struct rt_softc_rx_ring *ring);
119248590Smmstatic void	rt_reset_rx_ring(struct rt_softc *sc,
120248590Smm		    struct rt_softc_rx_ring *ring);
121248590Smmstatic void	rt_free_rx_ring(struct rt_softc *sc,
122248590Smm		    struct rt_softc_rx_ring *ring);
123248590Smmstatic int	rt_alloc_tx_ring(struct rt_softc *sc,
124248590Smm		    struct rt_softc_tx_ring *ring, int qid);
125248590Smmstatic void	rt_reset_tx_ring(struct rt_softc *sc,
126248590Smm		    struct rt_softc_tx_ring *ring);
127248590Smmstatic void	rt_free_tx_ring(struct rt_softc *sc,
128248590Smm		    struct rt_softc_tx_ring *ring);
129248590Smmstatic void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
130248590Smm		    int nseg, int error);
131248590Smmstatic void	rt_sysctl_attach(struct rt_softc *sc);
132248590Smm#ifdef IF_RT_PHY_SUPPORT
133248590Smmvoid		rt_miibus_statchg(device_t);
134248590Smmstatic int	rt_miibus_readreg(device_t, int, int);
135248590Smmstatic int	rt_miibus_writereg(device_t, int, int, int);
136248590Smm#endif
137248590Smmstatic int	rt_ifmedia_upd(struct ifnet *);
138248590Smmstatic void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
139248590Smm
140248590Smmstatic SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
141248590Smm#ifdef IF_RT_DEBUG
142248590Smmstatic int rt_debug = 0;
143248590SmmSYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RW, &rt_debug, 0,
144248590Smm    "RT debug level");
145248590SmmTUNABLE_INT("hw.rt.debug", &rt_debug);
146248590Smm#endif
147248590Smm
148248590Smmstatic int
149248590Smmrt_probe(device_t dev)
150248590Smm{
151248590Smm	device_set_desc(dev, "Ralink RT305XF onChip Ethernet MAC");
152248590Smm	return (BUS_PROBE_NOWILDCARD);
153248590Smm}
154248590Smm
155248590Smm/*
156248590Smm * macaddr_atoi - translate string MAC address to uint8_t array
157248590Smm */
158248590Smmstatic int
159248590Smmmacaddr_atoi(const char *str, uint8_t *mac)
160248590Smm{
161248590Smm	int count, i;
162248590Smm	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
163248590Smm
164248590Smm	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
165248590Smm	    &amac[0], &amac[1], &amac[2],
166248590Smm	    &amac[3], &amac[4], &amac[5]);
167248590Smm	if (count < ETHER_ADDR_LEN) {
168248590Smm		memset(mac, 0, ETHER_ADDR_LEN);
169248590Smm		return (1);
170248590Smm	}
171248590Smm
172248590Smm	/* Copy aligned to result */
173248590Smm	for (i = 0; i < ETHER_ADDR_LEN; i ++)
174248590Smm		mac[i] = (amac[i] & 0xff);
175248590Smm
176248590Smm	return (0);
177248590Smm}
178248590Smm
179248590Smm#ifdef USE_GENERATED_MAC_ADDRESS
180248590Smmstatic char *
181248590Smmkernenv_next(char *cp)
182248590Smm{
183248590Smm
184248590Smm	if (cp != NULL) {
185248590Smm		while (*cp != 0)
186248590Smm			cp++;
187248590Smm		cp++;
188248590Smm		if (*cp == 0)
189248590Smm			cp = NULL;
190248590Smm	}
191248590Smm	return (cp);
192248590Smm}
193248590Smm
194248590Smm/*
195248590Smm * generate_mac(uin8_t *mac)
196248590Smm * This is MAC address generator for cases when real device MAC address
197248590Smm * unknown or not yet accessible.
198248590Smm * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
199248590Smm * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
200248590Smm *
201248590Smm * Output - MAC address, that do not change between reboots, if hints or
202248590Smm * bootloader info unchange.
203248590Smm */
204248590Smmstatic void
205248590Smmgenerate_mac(uint8_t *mac)
206248590Smm{
207248590Smm	unsigned char *cp;
208248590Smm	int i = 0;
209248590Smm	uint32_t crc = 0xffffffff;
210248590Smm
211248590Smm	/* Generate CRC32 on kenv */
212248590Smm	if (dynamic_kenv) {
213248590Smm		for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
214248590Smm			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
215248590Smm		}
216248590Smm	} else {
217248590Smm		for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
218248590Smm			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
219248590Smm		}
220248590Smm	}
221248590Smm	crc = ~crc;
222248590Smm
223248590Smm	mac[0] = 'b';
224248590Smm	mac[1] = 's';
225248590Smm	mac[2] = 'd';
226248590Smm	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
227248590Smm	mac[4] = (crc >> 8) & 0xff;
228248590Smm	mac[5] = crc & 0xff;
229248590Smm}
230248590Smm#endif
231248590Smm
232248590Smm/*
233248590Smm * ether_request_mac - try to find usable MAC address.
234248590Smm */
235248590Smmstatic int
236248590Smmether_request_mac(device_t dev, uint8_t *mac)
237248590Smm{
238248590Smm	char *var;
239248590Smm
240248590Smm	/*
241248590Smm	 * "ethaddr" is passed via envp on RedBoot platforms
242248590Smm	 * "kmac" is passed via argv on RouterBOOT platforms
243248590Smm	 */
244248590Smm#if defined(__U_BOOT__) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
245248590Smm	if ((var = getenv("ethaddr")) != NULL ||
246248590Smm	    (var = getenv("kmac")) != NULL ) {
247248590Smm
248248590Smm		if(!macaddr_atoi(var, mac)) {
249248590Smm			printf("%s: use %s macaddr from KENV\n",
250248590Smm			    device_get_nameunit(dev), var);
251248590Smm			freeenv(var);
252248590Smm			return (0);
253248590Smm		}
254248590Smm		freeenv(var);
255248590Smm	}
256248590Smm#endif
257248590Smm
258248590Smm	/*
259248590Smm	 * Try from hints
260248590Smm	 * hint.[dev].[unit].macaddr
261248590Smm	 */
262248590Smm	if (!resource_string_value(device_get_name(dev),
263248590Smm	    device_get_unit(dev), "macaddr", (const char **)&var)) {
264248590Smm
265248590Smm		if(!macaddr_atoi(var, mac)) {
266248590Smm			printf("%s: use %s macaddr from hints\n",
267248590Smm			    device_get_nameunit(dev), var);
268248590Smm			return (0);
269248590Smm		}
270248590Smm	}
271248590Smm
272248590Smm#ifdef USE_GENERATED_MAC_ADDRESS
273248590Smm	generate_mac(mac);
274248590Smm
275248590Smm	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
276248590Smm	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
277248590Smm#else
278248590Smm	/* Hardcoded */
279248590Smm	mac[0] = 0x00;
280248590Smm	mac[1] = 0x18;
281248590Smm	mac[2] = 0xe7;
282248590Smm	mac[3] = 0xd5;
283248590Smm	mac[4] = 0x83;
284248590Smm	mac[5] = 0x90;
285248590Smm
286248590Smm	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
287248590Smm#endif
288248590Smm
289248590Smm	return (0);
290248590Smm}
291248590Smm
292248590Smmstatic int
293248590Smmrt_attach(device_t dev)
294248590Smm{
295248590Smm	struct rt_softc *sc;
296248590Smm	struct ifnet *ifp;
297248590Smm	int error, i;
298248590Smm
299248590Smm	sc = device_get_softc(dev);
300248590Smm	sc->dev = dev;
301248590Smm
302248590Smm	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
303248590Smm	    MTX_DEF | MTX_RECURSE);
304248590Smm
305248590Smm	sc->mem_rid = 0;
306248590Smm	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
307248590Smm	    RF_ACTIVE);
308248590Smm	if (sc->mem == NULL) {
309248590Smm		device_printf(dev, "could not allocate memory resource\n");
310248590Smm		error = ENXIO;
311248590Smm		goto fail;
312248590Smm	}
313248590Smm
314248590Smm	sc->bst = rman_get_bustag(sc->mem);
315248590Smm	sc->bsh = rman_get_bushandle(sc->mem);
316248590Smm
317248590Smm	sc->irq_rid = 0;
318248590Smm	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
319248590Smm	    RF_ACTIVE);
320248590Smm	if (sc->irq == NULL) {
321248590Smm		device_printf(dev,
322248590Smm		    "could not allocate interrupt resource\n");
323248590Smm		error = ENXIO;
324248590Smm		goto fail;
325248590Smm	}
326248590Smm
327248590Smm#ifdef IF_RT_DEBUG
328248590Smm	sc->debug = rt_debug;
329248590Smm
330248590Smm	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
331248590Smm		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
332248590Smm		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
333248590Smm#endif
334248590Smm
335248590Smm	device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
336248590Smm	    sc->mac_rev);
337248590Smm
338248590Smm	/* Reset hardware */
339248590Smm	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
340248590Smm
341248590Smm	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
342248590Smm	    (
343248590Smm	    GDM_ICS_EN | /* Enable IP Csum */
344248590Smm	    GDM_TCS_EN | /* Enable TCP Csum */
345248590Smm	    GDM_UCS_EN | /* Enable UDP Csum */
346248590Smm	    GDM_STRPCRC | /* Strip CRC from packet */
347248590Smm	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
348248590Smm	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
349248590Smm	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
350248590Smm	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
351248590Smm	    ));
352248590Smm
353248590Smm	/* allocate Tx and Rx rings */
354248590Smm	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
355248590Smm		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
356248590Smm		if (error != 0) {
357248590Smm			device_printf(dev, "could not allocate Tx ring #%d\n",
358248590Smm			    i);
359248590Smm			goto fail;
360248590Smm		}
361248590Smm	}
362248590Smm
363248590Smm	sc->tx_ring_mgtqid = 5;
364248590Smm
365248590Smm	error = rt_alloc_rx_ring(sc, &sc->rx_ring);
366248590Smm	if (error != 0) {
367248590Smm		device_printf(dev, "could not allocate Rx ring\n");
368248590Smm		goto fail;
369248590Smm	}
370248590Smm
371248590Smm	callout_init(&sc->periodic_ch, 0);
372248590Smm	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
373248590Smm
374248590Smm	ifp = sc->ifp = if_alloc(IFT_ETHER);
375248590Smm	if (ifp == NULL) {
376		device_printf(dev, "could not if_alloc()\n");
377		error = ENOMEM;
378		goto fail;
379	}
380
381	ifp->if_softc = sc;
382	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
383	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
384	ifp->if_init = rt_init;
385	ifp->if_ioctl = rt_ioctl;
386	ifp->if_start = rt_start;
387#define	RT_TX_QLEN	256
388
389	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
390	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
391	IFQ_SET_READY(&ifp->if_snd);
392
393#ifdef IF_RT_PHY_SUPPORT
394	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
395	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
396	if (error != 0) {
397		device_printf(dev, "attaching PHYs failed\n");
398		error = ENXIO;
399		goto fail;
400	}
401#else
402	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
403	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
404	    NULL);
405	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
406
407#endif /* IF_RT_PHY_SUPPORT */
408
409	ether_request_mac(dev, sc->mac_addr);
410	ether_ifattach(ifp, sc->mac_addr);
411
412	/*
413	 * Tell the upper layer(s) we support long frames.
414	 */
415	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
416	ifp->if_capabilities |= IFCAP_VLAN_MTU;
417	ifp->if_capenable |= IFCAP_VLAN_MTU;
418	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
419	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
420
421	/* init task queue */
422	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
423	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
424	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
425
426	sc->rx_process_limit = 100;
427
428	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
429	    taskqueue_thread_enqueue, &sc->taskqueue);
430
431	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
432	    device_get_nameunit(sc->dev));
433
434	rt_sysctl_attach(sc);
435
436	/* set up interrupt */
437	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
438	    NULL, rt_intr, sc, &sc->irqh);
439	if (error != 0) {
440		printf("%s: could not set up interrupt\n",
441			device_get_nameunit(dev));
442		goto fail;
443	}
444#ifdef IF_RT_DEBUG
445	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
446#endif
447
448	return (0);
449
450fail:
451	/* free Tx and Rx rings */
452	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
453		rt_free_tx_ring(sc, &sc->tx_ring[i]);
454
455	rt_free_rx_ring(sc, &sc->rx_ring);
456
457	mtx_destroy(&sc->lock);
458
459	if (sc->mem != NULL)
460		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
461		    sc->mem);
462
463	if (sc->irq != NULL)
464		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
465		    sc->irq);
466
467	return (error);
468}
469
470/*
471 * Set media options.
472 */
473static int
474rt_ifmedia_upd(struct ifnet *ifp)
475{
476	struct rt_softc *sc;
477#ifdef IF_RT_PHY_SUPPORT
478	struct mii_data *mii;
479	struct mii_softc *miisc;
480	int error = 0;
481
482	sc = ifp->if_softc;
483	RT_SOFTC_LOCK(sc);
484
485	mii = device_get_softc(sc->rt_miibus);
486	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
487		PHY_RESET(miisc);
488	error = mii_mediachg(mii);
489	RT_SOFTC_UNLOCK(sc);
490
491	return (error);
492
493#else /* !IF_RT_PHY_SUPPORT */
494
495	struct ifmedia *ifm;
496	struct ifmedia_entry *ife;
497
498	sc = ifp->if_softc;
499	ifm = &sc->rt_ifmedia;
500	ife = ifm->ifm_cur;
501
502	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
503		return (EINVAL);
504
505	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
506		device_printf(sc->dev,
507		    "AUTO is not supported for multiphy MAC");
508		return (EINVAL);
509	}
510
511	/*
512	 * Ignore everything
513	 */
514	return (0);
515#endif /* IF_RT_PHY_SUPPORT */
516}
517
518/*
519 * Report current media status.
520 */
521static void
522rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
523{
524#ifdef IF_RT_PHY_SUPPORT
525	struct rt_softc *sc;
526	struct mii_data *mii;
527
528	sc = ifp->if_softc;
529
530	RT_SOFTC_LOCK(sc);
531	mii = device_get_softc(sc->rt_miibus);
532	mii_pollstat(mii);
533	ifmr->ifm_active = mii->mii_media_active;
534	ifmr->ifm_status = mii->mii_media_status;
535	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
536	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
537	RT_SOFTC_UNLOCK(sc);
538#else /* !IF_RT_PHY_SUPPORT */
539
540	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
541	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
542#endif /* IF_RT_PHY_SUPPORT */
543}
544
545static int
546rt_detach(device_t dev)
547{
548	struct rt_softc *sc;
549	struct ifnet *ifp;
550	int i;
551
552	sc = device_get_softc(dev);
553	ifp = sc->ifp;
554
555	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
556
557	RT_SOFTC_LOCK(sc);
558
559	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
560
561	callout_stop(&sc->periodic_ch);
562	callout_stop(&sc->tx_watchdog_ch);
563
564	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
565	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
566	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
567
568	/* free Tx and Rx rings */
569	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
570		rt_free_tx_ring(sc, &sc->tx_ring[i]);
571
572	rt_free_rx_ring(sc, &sc->rx_ring);
573
574	RT_SOFTC_UNLOCK(sc);
575
576#ifdef IF_RT_PHY_SUPPORT
577	if (sc->rt_miibus != NULL)
578		device_delete_child(dev, sc->rt_miibus);
579#endif
580
581	ether_ifdetach(ifp);
582	if_free(ifp);
583
584	taskqueue_free(sc->taskqueue);
585
586	mtx_destroy(&sc->lock);
587
588	bus_generic_detach(dev);
589	bus_teardown_intr(dev, sc->irq, sc->irqh);
590	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
591	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
592
593	return (0);
594}
595
596static int
597rt_shutdown(device_t dev)
598{
599	struct rt_softc *sc;
600
601	sc = device_get_softc(dev);
602	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
603	rt_stop(sc);
604
605	return (0);
606}
607
608static int
609rt_suspend(device_t dev)
610{
611	struct rt_softc *sc;
612
613	sc = device_get_softc(dev);
614	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
615	rt_stop(sc);
616
617	return (0);
618}
619
620static int
621rt_resume(device_t dev)
622{
623	struct rt_softc *sc;
624	struct ifnet *ifp;
625
626	sc = device_get_softc(dev);
627	ifp = sc->ifp;
628
629	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
630
631	if (ifp->if_flags & IFF_UP)
632		rt_init(sc);
633
634	return (0);
635}
636
637/*
638 * rt_init_locked - Run initialization process having locked mtx.
639 */
640static void
641rt_init_locked(void *priv)
642{
643	struct rt_softc *sc;
644	struct ifnet *ifp;
645#ifdef IF_RT_PHY_SUPPORT
646	struct mii_data *mii;
647#endif
648	int i, ntries;
649	uint32_t tmp;
650
651	sc = priv;
652	ifp = sc->ifp;
653#ifdef IF_RT_PHY_SUPPORT
654	mii = device_get_softc(sc->rt_miibus);
655#endif
656
657	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
658
659	RT_SOFTC_ASSERT_LOCKED(sc);
660
661	/* hardware reset */
662	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
663	rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
664
665	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
666	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
667	    (
668	    GDM_ICS_EN | /* Enable IP Csum */
669	    GDM_TCS_EN | /* Enable TCP Csum */
670	    GDM_UCS_EN | /* Enable UDP Csum */
671	    GDM_STRPCRC | /* Strip CRC from packet */
672	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
673	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
674	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
675	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
676	    ));
677
678	/* disable DMA engine */
679	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 0);
680	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, 0xffffffff);
681
682	/* wait while DMA engine is busy */
683	for (ntries = 0; ntries < 100; ntries++) {
684		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
685		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
686			break;
687		DELAY(1000);
688	}
689
690	if (ntries == 100) {
691		device_printf(sc->dev, "timeout waiting for DMA engine\n");
692		goto fail;
693	}
694
695	/* reset Rx and Tx rings */
696	tmp = FE_RST_DRX_IDX0 |
697		FE_RST_DTX_IDX3 |
698		FE_RST_DTX_IDX2 |
699		FE_RST_DTX_IDX1 |
700		FE_RST_DTX_IDX0;
701
702	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, tmp);
703
704	/* XXX switch set mac address */
705	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
706		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
707
708	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
709		/* update TX_BASE_PTRx */
710		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
711			sc->tx_ring[i].desc_phys_addr);
712		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
713			RT_SOFTC_TX_RING_DESC_COUNT);
714		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
715	}
716
717	/* init Rx ring */
718	rt_reset_rx_ring(sc, &sc->rx_ring);
719
720	/* update RX_BASE_PTR0 */
721	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
722		sc->rx_ring.desc_phys_addr);
723	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
724		RT_SOFTC_RX_RING_DATA_COUNT);
725	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
726		RT_SOFTC_RX_RING_DATA_COUNT - 1);
727
728	/* write back DDONE, 16byte burst enable RX/TX DMA */
729	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG,
730	    FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN);
731
732	/* disable interrupts mitigation */
733	RT_WRITE(sc, PDMA_BASE + DELAY_INT_CFG, 0);
734
735	/* clear pending interrupts */
736	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, 0xffffffff);
737
738	/* enable interrupts */
739	tmp = 	CNT_PPE_AF |
740		CNT_GDM_AF |
741		PSE_P2_FC |
742		GDM_CRC_DROP |
743		PSE_BUF_DROP |
744		GDM_OTHER_DROP |
745		PSE_P1_FC |
746		PSE_P0_FC |
747		PSE_FQ_EMPTY |
748		INT_TX_COHERENT |
749		INT_RX_COHERENT |
750		INT_TXQ3_DONE |
751		INT_TXQ2_DONE |
752		INT_TXQ1_DONE |
753		INT_TXQ0_DONE |
754		INT_RX_DONE;
755
756	sc->intr_enable_mask = tmp;
757
758	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
759
760	if (rt_txrx_enable(sc) != 0)
761		goto fail;
762
763#ifdef IF_RT_PHY_SUPPORT
764	if (mii) mii_mediachg(mii);
765#endif /* IF_RT_PHY_SUPPORT */
766
767	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
768	ifp->if_drv_flags |= IFF_DRV_RUNNING;
769
770	sc->periodic_round = 0;
771
772	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
773
774	return;
775
776fail:
777	rt_stop_locked(sc);
778}
779
780/*
781 * rt_init - lock and initialize device.
782 */
783static void
784rt_init(void *priv)
785{
786	struct rt_softc *sc;
787
788	sc = priv;
789	RT_SOFTC_LOCK(sc);
790	rt_init_locked(sc);
791	RT_SOFTC_UNLOCK(sc);
792}
793
794/*
795 * rt_stop_locked - stop TX/RX w/ lock
796 */
797static void
798rt_stop_locked(void *priv)
799{
800	struct rt_softc *sc;
801	struct ifnet *ifp;
802
803	sc = priv;
804	ifp = sc->ifp;
805
806	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
807
808	RT_SOFTC_ASSERT_LOCKED(sc);
809	sc->tx_timer = 0;
810	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
811	callout_stop(&sc->periodic_ch);
812	callout_stop(&sc->tx_watchdog_ch);
813	RT_SOFTC_UNLOCK(sc);
814	taskqueue_block(sc->taskqueue);
815
816	/*
817	 * Sometime rt_stop_locked called from isr and we get panic
818	 * When found, I fix it
819	 */
820#ifdef notyet
821	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
822	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
823	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
824#endif
825	RT_SOFTC_LOCK(sc);
826
827	/* disable interrupts */
828	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, 0);
829
830	/* reset adapter */
831	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
832
833	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
834	    (
835	    GDM_ICS_EN | /* Enable IP Csum */
836	    GDM_TCS_EN | /* Enable TCP Csum */
837	    GDM_UCS_EN | /* Enable UDP Csum */
838	    GDM_STRPCRC | /* Strip CRC from packet */
839	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
840	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
841	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
842	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
843	    ));
844}
845
846static void
847rt_stop(void *priv)
848{
849	struct rt_softc *sc;
850
851	sc = priv;
852	RT_SOFTC_LOCK(sc);
853	rt_stop_locked(sc);
854	RT_SOFTC_UNLOCK(sc);
855}
856
857/*
858 * rt_tx_data - transmit packet.
859 */
860static int
861rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
862{
863	struct ifnet *ifp;
864	struct rt_softc_tx_ring *ring;
865	struct rt_softc_tx_data *data;
866	struct rt_txdesc *desc;
867	struct mbuf *m_d;
868	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
869	int error, ndmasegs, ndescs, i;
870
871	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
872		("%s: Tx data: invalid qid=%d\n",
873		 device_get_nameunit(sc->dev), qid));
874
875	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
876
877	ifp = sc->ifp;
878	ring = &sc->tx_ring[qid];
879	desc = &ring->desc[ring->desc_cur];
880	data = &ring->data[ring->data_cur];
881
882	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
883	    dma_seg, &ndmasegs, 0);
884	if (error != 0)	{
885		/* too many fragments, linearize */
886
887		RT_DPRINTF(sc, RT_DEBUG_TX,
888			"could not load mbuf DMA map, trying to linearize "
889			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
890			ndmasegs, m->m_pkthdr.len, error);
891
892		m_d = m_collapse(m, M_NOWAIT, 16);
893		if (m_d == NULL) {
894			m_freem(m);
895			m = NULL;
896			return (ENOMEM);
897		}
898		m = m_d;
899
900		sc->tx_defrag_packets++;
901
902		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
903		    data->dma_map, m, dma_seg, &ndmasegs, 0);
904		if (error != 0)	{
905			device_printf(sc->dev, "could not load mbuf DMA map: "
906			    "ndmasegs=%d, len=%d, error=%d\n",
907			    ndmasegs, m->m_pkthdr.len, error);
908			m_freem(m);
909			return (error);
910		}
911	}
912
913	if (m->m_pkthdr.len == 0)
914		ndmasegs = 0;
915
916	/* determine how many Tx descs are required */
917	ndescs = 1 + ndmasegs / 2;
918	if ((ring->desc_queued + ndescs) >
919	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
920		RT_DPRINTF(sc, RT_DEBUG_TX,
921		    "there are not enough Tx descs\n");
922
923		sc->no_tx_desc_avail++;
924
925		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
926		m_freem(m);
927		return (EFBIG);
928	}
929
930	data->m = m;
931
932	/* set up Tx descs */
933	for (i = 0; i < ndmasegs; i += 2) {
934		/* Set destenation */
935		desc->dst = (TXDSCR_DST_PORT_GDMA1);
936		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
937			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
938			    TXDSCR_TCP_CSUM_GEN);
939		/* Set queue id */
940		desc->qn = qid;
941		/* No PPPoE */
942		desc->pppoe = 0;
943		/* No VLAN */
944		desc->vid = 0;
945
946		desc->sdp0 = htole32(dma_seg[i].ds_addr);
947		desc->sdl0 = htole16(dma_seg[i].ds_len |
948		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
949
950		if ((i+1) < ndmasegs) {
951			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
952			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
953			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
954		} else {
955			desc->sdp1 = 0;
956			desc->sdl1 = 0;
957		}
958
959		if ((i+2) < ndmasegs) {
960			ring->desc_queued++;
961			ring->desc_cur = (ring->desc_cur + 1) %
962			    RT_SOFTC_TX_RING_DESC_COUNT;
963		}
964		desc = &ring->desc[ring->desc_cur];
965	}
966
967	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
968	    "DMA ds_len=%d/%d/%d/%d/%d\n",
969	    m->m_pkthdr.len, ndmasegs,
970	    (int) dma_seg[0].ds_len,
971	    (int) dma_seg[1].ds_len,
972	    (int) dma_seg[2].ds_len,
973	    (int) dma_seg[3].ds_len,
974	    (int) dma_seg[4].ds_len);
975
976	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
977		BUS_DMASYNC_PREWRITE);
978	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
979		BUS_DMASYNC_PREWRITE);
980	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
981		BUS_DMASYNC_PREWRITE);
982
983	ring->desc_queued++;
984	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
985
986	ring->data_queued++;
987	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
988
989	/* kick Tx */
990	RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(qid), ring->desc_cur);
991
992	return (0);
993}
994
995/*
996 * rt_start - start Transmit/Receive
997 */
998static void
999rt_start(struct ifnet *ifp)
1000{
1001	struct rt_softc *sc;
1002	struct mbuf *m;
1003	int qid = 0 /* XXX must check QoS priority */;
1004
1005	sc = ifp->if_softc;
1006
1007	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1008		return;
1009
1010	for (;;) {
1011		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1012		if (m == NULL)
1013			break;
1014
1015		m->m_pkthdr.rcvif = NULL;
1016
1017		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1018
1019		if (sc->tx_ring[qid].data_queued >=
1020		    RT_SOFTC_TX_RING_DATA_COUNT) {
1021			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1022
1023			RT_DPRINTF(sc, RT_DEBUG_TX,
1024			    "if_start: Tx ring with qid=%d is full\n", qid);
1025
1026			m_freem(m);
1027
1028			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1029			ifp->if_oerrors++;
1030
1031			sc->tx_data_queue_full[qid]++;
1032
1033			break;
1034		}
1035
1036		if (rt_tx_data(sc, m, qid) != 0) {
1037			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1038
1039			ifp->if_oerrors++;
1040
1041			break;
1042		}
1043
1044		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1045		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1046		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1047	}
1048}
1049
1050/*
1051 * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1052 * filtering done by attached Ethernet switch.
1053 */
1054static void
1055rt_update_promisc(struct ifnet *ifp)
1056{
1057	struct rt_softc *sc;
1058
1059	sc = ifp->if_softc;
1060	printf("%s: %s promiscuous mode\n",
1061		device_get_nameunit(sc->dev),
1062		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1063}
1064
1065/*
1066 * rt_ioctl - ioctl handler.
1067 */
1068static int
1069rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1070{
1071	struct rt_softc *sc;
1072	struct ifreq *ifr;
1073#ifdef IF_RT_PHY_SUPPORT
1074	struct mii_data *mii;
1075#endif /* IF_RT_PHY_SUPPORT */
1076	int error, startall;
1077
1078	sc = ifp->if_softc;
1079	ifr = (struct ifreq *) data;
1080
1081	error = 0;
1082
1083	switch (cmd) {
1084	case SIOCSIFFLAGS:
1085		startall = 0;
1086		RT_SOFTC_LOCK(sc);
1087		if (ifp->if_flags & IFF_UP) {
1088			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1089				if ((ifp->if_flags ^ sc->if_flags) &
1090				    IFF_PROMISC)
1091					rt_update_promisc(ifp);
1092			} else {
1093				rt_init_locked(sc);
1094				startall = 1;
1095			}
1096		} else {
1097			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098				rt_stop_locked(sc);
1099		}
1100		sc->if_flags = ifp->if_flags;
1101		RT_SOFTC_UNLOCK(sc);
1102		break;
1103	case SIOCGIFMEDIA:
1104	case SIOCSIFMEDIA:
1105#ifdef IF_RT_PHY_SUPPORT
1106		mii = device_get_softc(sc->rt_miibus);
1107		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1108#else
1109		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1110#endif /* IF_RT_PHY_SUPPORT */
1111		break;
1112	default:
1113		error = ether_ioctl(ifp, cmd, data);
1114		break;
1115	}
1116	return (error);
1117}
1118
1119/*
1120 * rt_periodic - Handler of PERIODIC interrupt
1121 */
1122static void
1123rt_periodic(void *arg)
1124{
1125	struct rt_softc *sc;
1126
1127	sc = arg;
1128	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
1129	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
1130}
1131
1132/*
1133 * rt_tx_watchdog - Handler of TX Watchdog
1134 */
1135static void
1136rt_tx_watchdog(void *arg)
1137{
1138	struct rt_softc *sc;
1139	struct ifnet *ifp;
1140
1141	sc = arg;
1142	ifp = sc->ifp;
1143
1144	if (sc->tx_timer == 0)
1145		return;
1146
1147	if (--sc->tx_timer == 0) {
1148		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1149#ifdef notyet
1150		/*
1151		 * XXX: Commented out, because reset break input.
1152		 */
1153		rt_stop_locked(sc);
1154		rt_init_locked(sc);
1155#endif
1156		ifp->if_oerrors++;
1157		sc->tx_watchdog_timeouts++;
1158	}
1159	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1160}
1161
1162/*
1163 * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1164 */
1165static void
1166rt_cnt_ppe_af(struct rt_softc *sc)
1167{
1168
1169	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1170}
1171
1172/*
1173 * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1174 */
1175static void
1176rt_cnt_gdm_af(struct rt_softc *sc)
1177{
1178
1179	RT_DPRINTF(sc, RT_DEBUG_INTR,
1180	    "GDMA 1 & 2 Counter Table Almost Full\n");
1181}
1182
1183/*
1184 * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1185 */
1186static void
1187rt_pse_p2_fc(struct rt_softc *sc)
1188{
1189
1190	RT_DPRINTF(sc, RT_DEBUG_INTR,
1191	    "PSE port2 (GDMA 2) flow control asserted.\n");
1192}
1193
1194/*
1195 * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1196 * interrupt
1197 */
1198static void
1199rt_gdm_crc_drop(struct rt_softc *sc)
1200{
1201
1202	RT_DPRINTF(sc, RT_DEBUG_INTR,
1203	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1204}
1205
1206/*
1207 * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1208 */
1209static void
1210rt_pse_buf_drop(struct rt_softc *sc)
1211{
1212
1213	RT_DPRINTF(sc, RT_DEBUG_INTR,
1214	    "PSE discards a packet due to buffer sharing limitation\n");
1215}
1216
1217/*
1218 * rt_gdm_other_drop - Handler of discard on other reason interrupt
1219 */
1220static void
1221rt_gdm_other_drop(struct rt_softc *sc)
1222{
1223
1224	RT_DPRINTF(sc, RT_DEBUG_INTR,
1225	    "GDMA 1 & 2 discard a packet due to other reason\n");
1226}
1227
1228/*
1229 * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
1230 */
1231static void
1232rt_pse_p1_fc(struct rt_softc *sc)
1233{
1234
1235	RT_DPRINTF(sc, RT_DEBUG_INTR,
1236	    "PSE port1 (GDMA 1) flow control asserted.\n");
1237}
1238
1239/*
1240 * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1241 */
1242static void
1243rt_pse_p0_fc(struct rt_softc *sc)
1244{
1245
1246	RT_DPRINTF(sc, RT_DEBUG_INTR,
1247	    "PSE port0 (CDMA) flow control asserted.\n");
1248}
1249
1250/*
1251 * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1252 */
1253static void
1254rt_pse_fq_empty(struct rt_softc *sc)
1255{
1256
1257	RT_DPRINTF(sc, RT_DEBUG_INTR,
1258	    "PSE free Q empty threshold reached & forced drop "
1259		    "condition occurred.\n");
1260}
1261
1262/*
1263 * rt_intr - main ISR
1264 */
1265static void
1266rt_intr(void *arg)
1267{
1268	struct rt_softc *sc;
1269	struct ifnet *ifp;
1270	uint32_t status;
1271
1272	sc = arg;
1273	ifp = sc->ifp;
1274
1275	/* acknowledge interrupts */
1276	status = RT_READ(sc, GE_PORT_BASE + FE_INT_STATUS);
1277	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, status);
1278
1279	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1280
1281	if (status == 0xffffffff ||	/* device likely went away */
1282		status == 0)		/* not for us */
1283		return;
1284
1285	sc->interrupts++;
1286
1287	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1288		return;
1289
1290	if (status & CNT_PPE_AF)
1291		rt_cnt_ppe_af(sc);
1292
1293	if (status & CNT_GDM_AF)
1294		rt_cnt_gdm_af(sc);
1295
1296	if (status & PSE_P2_FC)
1297		rt_pse_p2_fc(sc);
1298
1299	if (status & GDM_CRC_DROP)
1300		rt_gdm_crc_drop(sc);
1301
1302	if (status & PSE_BUF_DROP)
1303		rt_pse_buf_drop(sc);
1304
1305	if (status & GDM_OTHER_DROP)
1306		rt_gdm_other_drop(sc);
1307
1308	if (status & PSE_P1_FC)
1309		rt_pse_p1_fc(sc);
1310
1311	if (status & PSE_P0_FC)
1312		rt_pse_p0_fc(sc);
1313
1314	if (status & PSE_FQ_EMPTY)
1315		rt_pse_fq_empty(sc);
1316
1317	if (status & INT_TX_COHERENT)
1318		rt_tx_coherent_intr(sc);
1319
1320	if (status & INT_RX_COHERENT)
1321		rt_rx_coherent_intr(sc);
1322
1323	if (status & RX_DLY_INT)
1324		rt_rx_delay_intr(sc);
1325
1326	if (status & TX_DLY_INT)
1327		rt_tx_delay_intr(sc);
1328
1329	if (status & INT_RX_DONE)
1330		rt_rx_intr(sc);
1331
1332	if (status & INT_TXQ3_DONE)
1333		rt_tx_intr(sc, 3);
1334
1335	if (status & INT_TXQ2_DONE)
1336		rt_tx_intr(sc, 2);
1337
1338	if (status & INT_TXQ1_DONE)
1339		rt_tx_intr(sc, 1);
1340
1341	if (status & INT_TXQ0_DONE)
1342		rt_tx_intr(sc, 0);
1343}
1344
1345static void
1346rt_tx_coherent_intr(struct rt_softc *sc)
1347{
1348	uint32_t tmp;
1349	int i;
1350
1351	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1352
1353	sc->tx_coherent_interrupts++;
1354
1355	/* restart DMA engine */
1356	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1357	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
1358	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1359
1360	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
1361		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1362
1363	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
1364		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
1365			sc->tx_ring[i].desc_phys_addr);
1366		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
1367			RT_SOFTC_TX_RING_DESC_COUNT);
1368		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
1369	}
1370
1371	rt_txrx_enable(sc);
1372}
1373
1374/*
1375 * rt_rx_coherent_intr
1376 */
1377static void
1378rt_rx_coherent_intr(struct rt_softc *sc)
1379{
1380	uint32_t tmp;
1381
1382	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1383
1384	sc->rx_coherent_interrupts++;
1385
1386	/* restart DMA engine */
1387	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1388	tmp &= ~(FE_RX_DMA_EN);
1389	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1390
1391	/* init Rx ring */
1392	rt_reset_rx_ring(sc, &sc->rx_ring);
1393	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
1394		sc->rx_ring.desc_phys_addr);
1395	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
1396		RT_SOFTC_RX_RING_DATA_COUNT);
1397	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1398		RT_SOFTC_RX_RING_DATA_COUNT - 1);
1399
1400	rt_txrx_enable(sc);
1401}
1402
1403/*
1404 * rt_rx_intr - a packet received
1405 */
1406static void
1407rt_rx_intr(struct rt_softc *sc)
1408{
1409
1410	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1411	sc->rx_interrupts++;
1412	RT_SOFTC_LOCK(sc);
1413
1414	if (!(sc->intr_disable_mask & INT_RX_DONE)) {
1415		rt_intr_disable(sc, INT_RX_DONE);
1416		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1417	}
1418
1419	sc->intr_pending_mask |= INT_RX_DONE;
1420	RT_SOFTC_UNLOCK(sc);
1421}
1422
1423static void
1424rt_rx_delay_intr(struct rt_softc *sc)
1425{
1426
1427	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1428	sc->rx_delay_interrupts++;
1429}
1430
1431static void
1432rt_tx_delay_intr(struct rt_softc *sc)
1433{
1434
1435	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1436	sc->tx_delay_interrupts++;
1437}
1438
1439/*
1440 * rt_tx_intr - Transsmition of packet done
1441 */
1442static void
1443rt_tx_intr(struct rt_softc *sc, int qid)
1444{
1445
1446	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1447		("%s: Tx interrupt: invalid qid=%d\n",
1448		 device_get_nameunit(sc->dev), qid));
1449
1450	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1451
1452	sc->tx_interrupts[qid]++;
1453	RT_SOFTC_LOCK(sc);
1454
1455	if (!(sc->intr_disable_mask & (INT_TXQ0_DONE << qid))) {
1456		rt_intr_disable(sc, (INT_TXQ0_DONE << qid));
1457		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1458	}
1459
1460	sc->intr_pending_mask |= (INT_TXQ0_DONE << qid);
1461	RT_SOFTC_UNLOCK(sc);
1462}
1463
1464/*
1465 * rt_rx_done_task - run RX task
1466 */
1467static void
1468rt_rx_done_task(void *context, int pending)
1469{
1470	struct rt_softc *sc;
1471	struct ifnet *ifp;
1472	int again;
1473
1474	sc = context;
1475	ifp = sc->ifp;
1476
1477	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
1478
1479	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1480		return;
1481
1482	sc->intr_pending_mask &= ~INT_RX_DONE;
1483
1484	again = rt_rx_eof(sc, sc->rx_process_limit);
1485
1486	RT_SOFTC_LOCK(sc);
1487
1488	if ((sc->intr_pending_mask & INT_RX_DONE) || again) {
1489		RT_DPRINTF(sc, RT_DEBUG_RX,
1490		    "Rx done task: scheduling again\n");
1491		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1492	} else {
1493		rt_intr_enable(sc, INT_RX_DONE);
1494	}
1495
1496	RT_SOFTC_UNLOCK(sc);
1497}
1498
1499/*
1500 * rt_tx_done_task - check for pending TX task in all queues
1501 */
1502static void
1503rt_tx_done_task(void *context, int pending)
1504{
1505	struct rt_softc *sc;
1506	struct ifnet *ifp;
1507	uint32_t intr_mask;
1508	int i;
1509
1510	sc = context;
1511	ifp = sc->ifp;
1512
1513	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1514
1515	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1516		return;
1517
1518	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1519		if (sc->intr_pending_mask & (INT_TXQ0_DONE << i)) {
1520			sc->intr_pending_mask &= ~(INT_TXQ0_DONE << i);
1521			rt_tx_eof(sc, &sc->tx_ring[i]);
1522		}
1523	}
1524
1525	sc->tx_timer = 0;
1526
1527	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1528
1529	intr_mask = (
1530		INT_TXQ3_DONE |
1531		INT_TXQ2_DONE |
1532		INT_TXQ1_DONE |
1533		INT_TXQ0_DONE);
1534
1535	RT_SOFTC_LOCK(sc);
1536
1537	rt_intr_enable(sc, ~sc->intr_pending_mask &
1538	    (sc->intr_disable_mask & intr_mask));
1539
1540	if (sc->intr_pending_mask & intr_mask) {
1541		RT_DPRINTF(sc, RT_DEBUG_TX,
1542		    "Tx done task: scheduling again\n");
1543		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1544	}
1545
1546	RT_SOFTC_UNLOCK(sc);
1547
1548	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1549		rt_start(ifp);
1550}
1551
1552/*
1553 * rt_periodic_task - run periodic task
1554 */
1555static void
1556rt_periodic_task(void *context, int pending)
1557{
1558	struct rt_softc *sc;
1559	struct ifnet *ifp;
1560
1561	sc = context;
1562	ifp = sc->ifp;
1563
1564	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1565	    sc->periodic_round);
1566
1567	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1568		return;
1569
1570	RT_SOFTC_LOCK(sc);
1571	sc->periodic_round++;
1572	rt_update_stats(sc);
1573
1574	if ((sc->periodic_round % 10) == 0) {
1575		rt_update_raw_counters(sc);
1576		rt_watchdog(sc);
1577	}
1578
1579	RT_SOFTC_UNLOCK(sc);
1580	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1581}
1582
1583/*
1584 * rt_rx_eof - check for frames that done by DMA engine and pass it into
1585 * network subsystem.
1586 */
1587static int
1588rt_rx_eof(struct rt_softc *sc, int limit)
1589{
1590	struct ifnet *ifp;
1591	struct rt_softc_rx_ring *ring;
1592	struct rt_rxdesc *desc;
1593	struct rt_softc_rx_data *data;
1594	struct mbuf *m, *mnew;
1595	bus_dma_segment_t segs[1];
1596	bus_dmamap_t dma_map;
1597	uint32_t index, desc_flags;
1598	int error, nsegs, len, nframes;
1599
1600	ifp = sc->ifp;
1601	ring = &sc->rx_ring;
1602
1603	nframes = 0;
1604
1605	while (limit != 0) {
1606		index = RT_READ(sc, PDMA_BASE + RX_DRX_IDX0);
1607		if (ring->cur == index)
1608			break;
1609
1610		desc = &ring->desc[ring->cur];
1611		data = &ring->data[ring->cur];
1612
1613		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1614		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1615
1616#ifdef IF_RT_DEBUG
1617		if ( sc->debug & RT_DEBUG_RX ) {
1618			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1619		        hexdump(desc, 16, 0, 0);
1620			printf("-----------------------------------\n");
1621		}
1622#endif
1623
1624		/* XXX Sometime device don`t set DDONE bit */
1625#ifdef DDONE_FIXED
1626		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1627			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1628			break;
1629		}
1630#endif
1631
1632		len = le16toh(desc->sdl0) & 0x3fff;
1633		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1634
1635		nframes++;
1636
1637		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1638		    MJUMPAGESIZE);
1639		if (mnew == NULL) {
1640			sc->rx_mbuf_alloc_errors++;
1641			ifp->if_ierrors++;
1642			goto skip;
1643		}
1644
1645		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
1646
1647		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
1648		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
1649		if (error != 0) {
1650			RT_DPRINTF(sc, RT_DEBUG_RX,
1651			    "could not load Rx mbuf DMA map: "
1652			    "error=%d, nsegs=%d\n",
1653			    error, nsegs);
1654
1655			m_freem(mnew);
1656
1657			sc->rx_mbuf_dmamap_errors++;
1658			ifp->if_ierrors++;
1659
1660			goto skip;
1661		}
1662
1663		KASSERT(nsegs == 1, ("%s: too many DMA segments",
1664			device_get_nameunit(sc->dev)));
1665
1666		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1667			BUS_DMASYNC_POSTREAD);
1668		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1669
1670		dma_map = data->dma_map;
1671		data->dma_map = ring->spare_dma_map;
1672		ring->spare_dma_map = dma_map;
1673
1674		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1675			BUS_DMASYNC_PREREAD);
1676
1677		m = data->m;
1678		desc_flags = desc->src;
1679
1680		data->m = mnew;
1681		/* Add 2 for proper align of RX IP header */
1682		desc->sdp0 = htole32(segs[0].ds_addr+2);
1683		desc->sdl0 = htole32(segs[0].ds_len-2);
1684		desc->src = 0;
1685		desc->ai = 0;
1686		desc->foe = 0;
1687
1688		RT_DPRINTF(sc, RT_DEBUG_RX,
1689		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1690
1691		m->m_pkthdr.rcvif = ifp;
1692		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1693		m->m_data += 2;
1694		m->m_pkthdr.len = m->m_len = len;
1695
1696		/* check for crc errors */
1697		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1698			/*check for valid checksum*/
1699			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1700			    RXDSXR_SRC_L4_CSUM_FAIL)) {
1701				RT_DPRINTF(sc, RT_DEBUG_RX,
1702				    "rxdesc: crc error\n");
1703
1704				ifp->if_ierrors++;
1705
1706				if (!(ifp->if_flags & IFF_PROMISC)) {
1707				    m_freem(m);
1708				    goto skip;
1709				}
1710			}
1711			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1712				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1713				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1714				m->m_pkthdr.csum_data = 0xffff;
1715			}
1716			m->m_flags &= ~M_HASFCS;
1717		}
1718
1719		(*ifp->if_input)(ifp, m);
1720skip:
1721		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1722
1723		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1724			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1725
1726		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1727
1728		limit--;
1729	}
1730
1731	if (ring->cur == 0)
1732		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1733			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1734	else
1735		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1736			ring->cur - 1);
1737
1738	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1739
1740	sc->rx_packets += nframes;
1741
1742	return (limit == 0);
1743}
1744
1745/*
1746 * rt_tx_eof - check for successful transmitted frames and mark their
1747 * descriptor as free.
1748 */
1749static void
1750rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1751{
1752	struct ifnet *ifp;
1753	struct rt_txdesc *desc;
1754	struct rt_softc_tx_data *data;
1755	uint32_t index;
1756	int ndescs, nframes;
1757
1758	ifp = sc->ifp;
1759
1760	ndescs = 0;
1761	nframes = 0;
1762
1763	for (;;) {
1764		index = RT_READ(sc, PDMA_BASE + TX_DTX_IDX(ring->qid));
1765		if (ring->desc_next == index)
1766			break;
1767
1768		ndescs++;
1769
1770		desc = &ring->desc[ring->desc_next];
1771
1772		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1773			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1774
1775		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1776			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1777			nframes++;
1778
1779			data = &ring->data[ring->data_next];
1780
1781			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1782				BUS_DMASYNC_POSTWRITE);
1783			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1784
1785			m_freem(data->m);
1786
1787			data->m = NULL;
1788
1789			ifp->if_opackets++;
1790
1791			RT_SOFTC_TX_RING_LOCK(ring);
1792			ring->data_queued--;
1793			ring->data_next = (ring->data_next + 1) %
1794			    RT_SOFTC_TX_RING_DATA_COUNT;
1795			RT_SOFTC_TX_RING_UNLOCK(ring);
1796		}
1797
1798		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
1799
1800		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1801			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1802
1803		RT_SOFTC_TX_RING_LOCK(ring);
1804		ring->desc_queued--;
1805		ring->desc_next = (ring->desc_next + 1) %
1806		    RT_SOFTC_TX_RING_DESC_COUNT;
1807		RT_SOFTC_TX_RING_UNLOCK(ring);
1808	}
1809
1810	RT_DPRINTF(sc, RT_DEBUG_TX,
1811	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
1812	    nframes);
1813}
1814
1815/*
1816 * rt_update_stats - query statistics counters and update related variables.
1817 */
1818static void
1819rt_update_stats(struct rt_softc *sc)
1820{
1821	struct ifnet *ifp;
1822
1823	ifp = sc->ifp;
1824	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
1825	/* XXX do update stats here */
1826}
1827
1828/*
1829 * rt_watchdog - reinit device on watchdog event.
1830 */
1831static void
1832rt_watchdog(struct rt_softc *sc)
1833{
1834	uint32_t tmp;
1835#ifdef notyet
1836	int ntries;
1837#endif
1838
1839	tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
1840
1841	RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n",
1842	    tmp);
1843
1844	/* XXX: do not reset */
1845#ifdef notyet
1846	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
1847		sc->tx_queue_not_empty[0]++;
1848
1849		for (ntries = 0; ntries < 10; ntries++) {
1850			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1851			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
1852				break;
1853
1854			DELAY(1);
1855		}
1856	}
1857
1858	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
1859		sc->tx_queue_not_empty[1]++;
1860
1861		for (ntries = 0; ntries < 10; ntries++) {
1862			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1863			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
1864				break;
1865
1866			DELAY(1);
1867		}
1868	}
1869#endif
1870}
1871
1872/*
1873 * rt_update_raw_counters - update counters.
1874 */
1875static void
1876rt_update_raw_counters(struct rt_softc *sc)
1877{
1878
1879	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
1880	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
1881	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
1882	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
1883
1884	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
1885	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
1886	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
1887	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
1888	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
1889	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
1890	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
1891}
1892
1893static void
1894rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
1895{
1896	uint32_t tmp;
1897
1898	sc->intr_disable_mask &= ~intr_mask;
1899	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1900	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1901}
1902
1903static void
1904rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
1905{
1906	uint32_t tmp;
1907
1908	sc->intr_disable_mask |= intr_mask;
1909	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1910	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1911}
1912
1913/*
1914 * rt_txrx_enable - enable TX/RX DMA
1915 */
1916static int
1917rt_txrx_enable(struct rt_softc *sc)
1918{
1919	struct ifnet *ifp;
1920	uint32_t tmp;
1921	int ntries;
1922
1923	ifp = sc->ifp;
1924
1925	/* enable Tx/Rx DMA engine */
1926	for (ntries = 0; ntries < 200; ntries++) {
1927		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1928		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
1929			break;
1930
1931		DELAY(1000);
1932	}
1933
1934	if (ntries == 200) {
1935		device_printf(sc->dev, "timeout waiting for DMA engine\n");
1936		return (-1);
1937	}
1938
1939	DELAY(50);
1940
1941	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
1942	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1943
1944	/* XXX set Rx filter */
1945	return (0);
1946}
1947
1948/*
1949 * rt_alloc_rx_ring - allocate RX DMA ring buffer
1950 */
1951static int
1952rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
1953{
1954	struct rt_rxdesc *desc;
1955	struct rt_softc_rx_data *data;
1956	bus_dma_segment_t segs[1];
1957	int i, nsegs, error;
1958
1959	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1960		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1961		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
1962		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1963		0, NULL, NULL, &ring->desc_dma_tag);
1964	if (error != 0)	{
1965		device_printf(sc->dev,
1966		    "could not create Rx desc DMA tag\n");
1967		goto fail;
1968	}
1969
1970	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
1971	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
1972	if (error != 0) {
1973		device_printf(sc->dev,
1974		    "could not allocate Rx desc DMA memory\n");
1975		goto fail;
1976	}
1977
1978	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
1979		ring->desc,
1980		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1981		rt_dma_map_addr, &ring->desc_phys_addr, 0);
1982	if (error != 0) {
1983		device_printf(sc->dev, "could not load Rx desc DMA map\n");
1984		goto fail;
1985	}
1986
1987	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1988	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1989		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
1990		&ring->data_dma_tag);
1991	if (error != 0)	{
1992		device_printf(sc->dev,
1993		    "could not create Rx data DMA tag\n");
1994		goto fail;
1995	}
1996
1997	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
1998		desc = &ring->desc[i];
1999		data = &ring->data[i];
2000
2001		error = bus_dmamap_create(ring->data_dma_tag, 0,
2002		    &data->dma_map);
2003		if (error != 0)	{
2004			device_printf(sc->dev, "could not create Rx data DMA "
2005			    "map\n");
2006			goto fail;
2007		}
2008
2009		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2010		    MJUMPAGESIZE);
2011		if (data->m == NULL) {
2012			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2013			error = ENOMEM;
2014			goto fail;
2015		}
2016
2017		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2018
2019		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2020		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2021		if (error != 0)	{
2022			device_printf(sc->dev,
2023			    "could not load Rx mbuf DMA map\n");
2024			goto fail;
2025		}
2026
2027		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2028			device_get_nameunit(sc->dev)));
2029
2030		/* Add 2 for proper align of RX IP header */
2031		desc->sdp0 = htole32(segs[0].ds_addr+2);
2032		desc->sdl0 = htole32(segs[0].ds_len-2);
2033	}
2034
2035	error = bus_dmamap_create(ring->data_dma_tag, 0,
2036	    &ring->spare_dma_map);
2037	if (error != 0) {
2038		device_printf(sc->dev,
2039		    "could not create Rx spare DMA map\n");
2040		goto fail;
2041	}
2042
2043	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2044		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2045	return (0);
2046
2047fail:
2048	rt_free_rx_ring(sc, ring);
2049	return (error);
2050}
2051
2052/*
2053 * rt_reset_rx_ring - reset RX ring buffer
2054 */
2055static void
2056rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2057{
2058	struct rt_rxdesc *desc;
2059	int i;
2060
2061	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2062		desc = &ring->desc[i];
2063		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2064	}
2065
2066	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2067		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2068	ring->cur = 0;
2069}
2070
2071/*
2072 * rt_free_rx_ring - free memory used by RX ring buffer
2073 */
2074static void
2075rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2076{
2077	struct rt_softc_rx_data *data;
2078	int i;
2079
2080	if (ring->desc != NULL) {
2081		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2082			BUS_DMASYNC_POSTWRITE);
2083		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2084		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2085			ring->desc_dma_map);
2086	}
2087
2088	if (ring->desc_dma_tag != NULL)
2089		bus_dma_tag_destroy(ring->desc_dma_tag);
2090
2091	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2092		data = &ring->data[i];
2093
2094		if (data->m != NULL) {
2095			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2096				BUS_DMASYNC_POSTREAD);
2097			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2098			m_freem(data->m);
2099		}
2100
2101		if (data->dma_map != NULL)
2102			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2103	}
2104
2105	if (ring->spare_dma_map != NULL)
2106		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2107
2108	if (ring->data_dma_tag != NULL)
2109		bus_dma_tag_destroy(ring->data_dma_tag);
2110}
2111
2112/*
2113 * rt_alloc_tx_ring - allocate TX ring buffer
2114 */
2115static int
2116rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2117{
2118	struct rt_softc_tx_data *data;
2119	int error, i;
2120
2121	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2122
2123	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2124		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2125		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2126		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2127		0, NULL, NULL, &ring->desc_dma_tag);
2128	if (error != 0) {
2129		device_printf(sc->dev,
2130		    "could not create Tx desc DMA tag\n");
2131		goto fail;
2132	}
2133
2134	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2135	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2136	if (error != 0)	{
2137		device_printf(sc->dev,
2138		    "could not allocate Tx desc DMA memory\n");
2139		goto fail;
2140	}
2141
2142	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2143	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2144	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2145	    &ring->desc_phys_addr, 0);
2146	if (error != 0) {
2147		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2148		goto fail;
2149	}
2150
2151	ring->desc_queued = 0;
2152	ring->desc_cur = 0;
2153	ring->desc_next = 0;
2154
2155	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2156	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2157	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2158	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2159	    0, NULL, NULL, &ring->seg0_dma_tag);
2160	if (error != 0) {
2161		device_printf(sc->dev,
2162		    "could not create Tx seg0 DMA tag\n");
2163		goto fail;
2164	}
2165
2166	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2167	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2168	if (error != 0) {
2169		device_printf(sc->dev,
2170		    "could not allocate Tx seg0 DMA memory\n");
2171		goto fail;
2172	}
2173
2174	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2175	    ring->seg0,
2176	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2177	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2178	if (error != 0) {
2179		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2180		goto fail;
2181	}
2182
2183	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2184	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2185	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2186	    &ring->data_dma_tag);
2187	if (error != 0) {
2188		device_printf(sc->dev,
2189		    "could not create Tx data DMA tag\n");
2190		goto fail;
2191	}
2192
2193	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2194		data = &ring->data[i];
2195
2196		error = bus_dmamap_create(ring->data_dma_tag, 0,
2197		    &data->dma_map);
2198		if (error != 0) {
2199			device_printf(sc->dev, "could not create Tx data DMA "
2200			    "map\n");
2201			goto fail;
2202		}
2203	}
2204
2205	ring->data_queued = 0;
2206	ring->data_cur = 0;
2207	ring->data_next = 0;
2208
2209	ring->qid = qid;
2210	return (0);
2211
2212fail:
2213	rt_free_tx_ring(sc, ring);
2214	return (error);
2215}
2216
2217/*
2218 * rt_reset_tx_ring - reset TX ring buffer to empty state
2219 */
2220static void
2221rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2222{
2223	struct rt_softc_tx_data *data;
2224	struct rt_txdesc *desc;
2225	int i;
2226
2227	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2228		desc = &ring->desc[i];
2229
2230		desc->sdl0 = 0;
2231		desc->sdl1 = 0;
2232	}
2233
2234	ring->desc_queued = 0;
2235	ring->desc_cur = 0;
2236	ring->desc_next = 0;
2237
2238	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2239		BUS_DMASYNC_PREWRITE);
2240
2241	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2242		BUS_DMASYNC_PREWRITE);
2243
2244	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2245		data = &ring->data[i];
2246
2247		if (data->m != NULL) {
2248			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2249				BUS_DMASYNC_POSTWRITE);
2250			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2251			m_freem(data->m);
2252			data->m = NULL;
2253		}
2254	}
2255
2256	ring->data_queued = 0;
2257	ring->data_cur = 0;
2258	ring->data_next = 0;
2259}
2260
2261/*
2262 * rt_free_tx_ring - free RX ring buffer
2263 */
2264static void
2265rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2266{
2267	struct rt_softc_tx_data *data;
2268	int i;
2269
2270	if (ring->desc != NULL) {
2271		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2272			BUS_DMASYNC_POSTWRITE);
2273		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2274		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2275			ring->desc_dma_map);
2276	}
2277
2278	if (ring->desc_dma_tag != NULL)
2279		bus_dma_tag_destroy(ring->desc_dma_tag);
2280
2281	if (ring->seg0 != NULL) {
2282		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2283			BUS_DMASYNC_POSTWRITE);
2284		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2285		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2286			ring->seg0_dma_map);
2287	}
2288
2289	if (ring->seg0_dma_tag != NULL)
2290		bus_dma_tag_destroy(ring->seg0_dma_tag);
2291
2292	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2293		data = &ring->data[i];
2294
2295		if (data->m != NULL) {
2296			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2297				BUS_DMASYNC_POSTWRITE);
2298			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2299			m_freem(data->m);
2300		}
2301
2302		if (data->dma_map != NULL)
2303			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2304	}
2305
2306	if (ring->data_dma_tag != NULL)
2307		bus_dma_tag_destroy(ring->data_dma_tag);
2308
2309	mtx_destroy(&ring->lock);
2310}
2311
2312/*
2313 * rt_dma_map_addr - get address of busdma segment
2314 */
2315static void
2316rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2317{
2318	if (error != 0)
2319		return;
2320
2321	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2322
2323	*(bus_addr_t *) arg = segs[0].ds_addr;
2324}
2325
2326/*
2327 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2328 */
2329static void
2330rt_sysctl_attach(struct rt_softc *sc)
2331{
2332	struct sysctl_ctx_list *ctx;
2333	struct sysctl_oid *tree;
2334	struct sysctl_oid *stats;
2335
2336	ctx = device_get_sysctl_ctx(sc->dev);
2337	tree = device_get_sysctl_tree(sc->dev);
2338
2339	/* statistic counters */
2340	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2341	    "stats", CTLFLAG_RD, 0, "statistic");
2342
2343	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2344	    "interrupts", CTLFLAG_RD, &sc->interrupts, 0,
2345	    "all interrupts");
2346
2347	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2348	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2349	    0, "Tx coherent interrupts");
2350
2351	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2352	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2353	    0, "Rx coherent interrupts");
2354
2355	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2356	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts, 0,
2357	    "Rx interrupts");
2358
2359	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2360	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, 0,
2361	    "Rx delay interrupts");
2362
2363	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2364	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], 0,
2365	    "Tx AC3 interrupts");
2366
2367	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2368	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], 0,
2369	    "Tx AC2 interrupts");
2370
2371	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2372	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], 0,
2373	    "Tx AC1 interrupts");
2374
2375	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2376	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], 0,
2377	    "Tx AC0 interrupts");
2378
2379	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2380	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2381	    0, "Tx delay interrupts");
2382
2383	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2384	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2385	    0, "Tx AC3 descriptors queued");
2386
2387	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2388	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2389	    0, "Tx AC3 data queued");
2390
2391	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2392	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2393	    0, "Tx AC2 descriptors queued");
2394
2395	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2396	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2397	    0, "Tx AC2 data queued");
2398
2399	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2400	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2401	    0, "Tx AC1 descriptors queued");
2402
2403	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2404	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2405	    0, "Tx AC1 data queued");
2406
2407	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2408	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2409	    0, "Tx AC0 descriptors queued");
2410
2411	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2412	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2413	    0, "Tx AC0 data queued");
2414
2415	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2416	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2417	    0, "Tx AC3 data queue full");
2418
2419	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2420	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2421	    0, "Tx AC2 data queue full");
2422
2423	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2424	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2425	    0, "Tx AC1 data queue full");
2426
2427	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2428	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2429	    0, "Tx AC0 data queue full");
2430
2431	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2432	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2433	    0, "Tx watchdog timeouts");
2434
2435	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2436	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, 0,
2437	    "Tx defragmented packets");
2438
2439	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2440	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, 0,
2441	    "no Tx descriptors available");
2442
2443	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2444	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2445	    0, "Rx mbuf allocation errors");
2446
2447	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2448	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2449	    0, "Rx mbuf DMA mapping errors");
2450
2451	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2452	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2453	    0, "Tx queue 0 not empty");
2454
2455	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2456	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2457	    0, "Tx queue 1 not empty");
2458
2459	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2460	    "rx_packets", CTLFLAG_RD, &sc->rx_packets, 0,
2461	    "Rx packets");
2462
2463	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2464	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, 0,
2465	    "Rx CRC errors");
2466
2467	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2468	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, 0,
2469	    "Rx PHY errors");
2470
2471	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2472	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, 0,
2473	    "Rx duplicate packets");
2474
2475	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2476	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, 0,
2477	    "Rx FIFO overflows");
2478
2479	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2480	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, 0,
2481	    "Rx bytes");
2482
2483	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2484	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, 0,
2485	    "Rx too long frame errors");
2486
2487	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2488	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, 0,
2489	    "Rx too short frame errors");
2490
2491	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2492	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, 0,
2493	    "Tx bytes");
2494	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2495	    "tx_packets", CTLFLAG_RD, &sc->tx_packets, 0,
2496	    "Tx packets");
2497	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2498	    "tx_skip", CTLFLAG_RD, &sc->tx_skip, 0,
2499	    "Tx skip count for GDMA ports");
2500	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2501	    "tx_collision", CTLFLAG_RD, &sc->tx_collision, 0,
2502	    "Tx collision count for GDMA ports");
2503}
2504
2505#ifdef IF_RT_PHY_SUPPORT
2506static int
2507rt_miibus_readreg(device_t dev, int phy, int reg)
2508{
2509	struct rt_softc *sc = device_get_softc(dev);
2510
2511	/*
2512	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2513	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2514	 */
2515	if (phy == 31) {
2516		/* Fake PHY ID for bfeswitch attach */
2517		switch (reg) {
2518		case MII_BMSR:
2519			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2520		case MII_PHYIDR1:
2521			return (0x40);		/* As result of faking */
2522		case MII_PHYIDR2:		/* PHY will detect as */
2523			return (0x6250);		/* bfeswitch */
2524		}
2525	}
2526
2527	/* Wait prev command done if any */
2528	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2529	RT_WRITE(sc, MDIO_ACCESS,
2530	    MDIO_CMD_ONGO ||
2531	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2532	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2533	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2534
2535	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2536}
2537
2538static int
2539rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2540{
2541	struct rt_softc *sc = device_get_softc(dev);
2542
2543	/* Wait prev command done if any */
2544	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2545	RT_WRITE(sc, MDIO_ACCESS,
2546	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2547	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2548	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2549	    (val & MDIO_PHY_DATA_MASK));
2550	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2551
2552	return (0);
2553}
2554
2555void
2556rt_miibus_statchg(device_t dev)
2557{
2558	struct rt_softc *sc = device_get_softc(dev);
2559	struct mii_data *mii;
2560
2561	mii = device_get_softc(sc->rt_miibus);
2562
2563	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2564	    (IFM_ACTIVE | IFM_AVALID)) {
2565		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2566		case IFM_10_T:
2567		case IFM_100_TX:
2568			/* XXX check link here */
2569			sc->flags |= 1;
2570			break;
2571		default:
2572			break;
2573		}
2574	}
2575}
2576#endif /* IF_RT_PHY_SUPPORT */
2577
2578static device_method_t rt_dev_methods[] =
2579{
2580	DEVMETHOD(device_probe, rt_probe),
2581	DEVMETHOD(device_attach, rt_attach),
2582	DEVMETHOD(device_detach, rt_detach),
2583	DEVMETHOD(device_shutdown, rt_shutdown),
2584	DEVMETHOD(device_suspend, rt_suspend),
2585	DEVMETHOD(device_resume, rt_resume),
2586
2587#ifdef IF_RT_PHY_SUPPORT
2588	/* MII interface */
2589	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2590	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2591	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2592#endif
2593
2594	DEVMETHOD_END
2595};
2596
2597static driver_t rt_driver =
2598{
2599	"rt",
2600	rt_dev_methods,
2601	sizeof(struct rt_softc)
2602};
2603
2604static devclass_t rt_dev_class;
2605
2606DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2607MODULE_DEPEND(rt, ether, 1, 1, 1);
2608MODULE_DEPEND(rt, miibus, 1, 1, 1);
2609
2610