if_rt.c revision 267992
1139804Simp/*-
2138363Sphk * Copyright (c) 2011, Aleksandr Rybalko
3138446Sphk * based on hard work
499264Smux * by Alexander Egorenkov <egorenar@gmail.com>
599264Smux * and by Damien Bergamini <damien.bergamini@free.fr>
699264Smux * All rights reserved.
799264Smux *
899264Smux * Redistribution and use in source and binary forms, with or without
999264Smux * modification, are permitted provided that the following conditions
1099264Smux * are met:
1199264Smux * 1. Redistributions of source code must retain the above copyright
1299264Smux *    notice unmodified, this list of conditions, and the following
1399264Smux *    disclaimer.
1499264Smux * 2. Redistributions in binary form must reproduce the above copyright
1599264Smux *    notice, this list of conditions and the following disclaimer in the
1699264Smux *    documentation and/or other materials provided with the distribution.
1799264Smux *
1899264Smux * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1999264Smux * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2099264Smux * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2199264Smux * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2299264Smux * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2399264Smux * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2499266Smux * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
251541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
261541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2799266Smux * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
281541Srgrimes * SUCH DAMAGE.
291541Srgrimes */
301541Srgrimes
311541Srgrimes#include <sys/cdefs.h>
321541Srgrimes__FBSDID("$FreeBSD: head/sys/dev/rt/if_rt.c 267992 2014-06-28 03:56:17Z hselasky $");
331541Srgrimes
341541Srgrimes#include "if_rtvar.h"
351541Srgrimes#include "if_rtreg.h"
361541Srgrimes
37116182Sobrien#include <net/if.h>
38116182Sobrien#include <net/if_var.h>
39116182Sobrien#include <net/if_arp.h>
4052778Smsmith#include <net/ethernet.h>
4199264Smux#include <net/if_dl.h>
42177785Skib#include <net/if_media.h>
43125340Spjd#include <net/if_types.h>
4431403Sjulian#include <net/if_vlan_var.h>
45141206Spjd
4699264Smux#include <net/bpf.h>
4799264Smux
4876166Smarkm#include <machine/bus.h>
4999264Smux#include <machine/cache.h>
50164033Srwatson#include <machine/cpufunc.h>
5199264Smux#include <machine/resource.h>
52108524Salfred#include <vm/vm_param.h>
5399264Smux#include <vm/vm.h>
54223919Sae#include <vm/pmap.h>
55138509Sphk#include <machine/pmap.h>
5699264Smux#include <sys/bus.h>
5799264Smux#include <sys/rman.h>
5899264Smux
5999264Smux#include <dev/mii/mii.h>
6099264Smux#include <dev/mii/miivar.h>
6199423Sjeff
62157322Sjeff#include <mips/rt305x/rt305x_sysctlvar.h>
631541Srgrimes#include <mips/rt305x/rt305xreg.h>
64105948Sphk
65105948Sphk#ifdef IF_RT_PHY_SUPPORT
6699264Smux#include "miibus_if.h"
6799264Smux#endif
68159274Srwatson
69163606Srwatson/*
70159274Srwatson * Defines and macros
71122640Skan */
7276166Smarkm#define	RT_MAX_AGG_SIZE			3840
73230725Smckusick
74230725Smckusick#define	RT_TX_DATA_SEG0_SIZE		MJUMPAGESIZE
75141634Sphk
7699264Smux#define	RT_MS(_v, _f)			(((_v) & _f) >> _f##_S)
77127476Spjd#define	RT_SM(_v, _f)			(((_v) << _f##_S) & _f)
78127476Spjd
79127476Spjd#define	RT_TX_WATCHDOG_TIMEOUT		5
8099264Smux
8152778Smsmith/*
82157322Sjeff * Static function prototypes
8322521Sdyson */
8499264Smuxstatic int	rt_probe(device_t dev);
8599264Smuxstatic int	rt_attach(device_t dev);
8630354Sphkstatic int	rt_detach(device_t dev);
8799264Smuxstatic int	rt_shutdown(device_t dev);
8899264Smuxstatic int	rt_suspend(device_t dev);
89145249Sphkstatic int	rt_resume(device_t dev);
9099264Smuxstatic void	rt_init_locked(void *priv);
9191690Seivindstatic void	rt_init(void *priv);
92138467Sphkstatic void	rt_stop_locked(void *priv);
93138467Sphkstatic void	rt_stop(void *priv);
94138467Sphkstatic void	rt_start(struct ifnet *ifp);
95153225Srodrigcstatic int	rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
96138467Sphkstatic void	rt_periodic(void *arg);
97138467Sphkstatic void	rt_tx_watchdog(void *arg);
98138467Sphkstatic void	rt_intr(void *arg);
99153034Srodrigcstatic void	rt_tx_coherent_intr(struct rt_softc *sc);
100171852Sjhbstatic void	rt_rx_coherent_intr(struct rt_softc *sc);
101171852Sjhbstatic void	rt_rx_delay_intr(struct rt_softc *sc);
102138467Sphkstatic void	rt_tx_delay_intr(struct rt_softc *sc);
103138467Sphkstatic void	rt_rx_intr(struct rt_softc *sc);
104138467Sphkstatic void	rt_tx_intr(struct rt_softc *sc, int qid);
105213365Smarcelstatic void	rt_rx_done_task(void *context, int pending);
106213365Smarcelstatic void	rt_tx_done_task(void *context, int pending);
107213365Smarcelstatic void	rt_periodic_task(void *context, int pending);
108213365Smarcelstatic int	rt_rx_eof(struct rt_softc *sc, int limit);
10952854Smsmithstatic void	rt_tx_eof(struct rt_softc *sc,
110213365Smarcel		    struct rt_softc_tx_ring *ring);
111213365Smarcelstatic void	rt_update_stats(struct rt_softc *sc);
112213365Smarcelstatic void	rt_watchdog(struct rt_softc *sc);
113213365Smarcelstatic void	rt_update_raw_counters(struct rt_softc *sc);
114213365Smarcelstatic void	rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask);
11510358Sjulianstatic void	rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask);
116213365Smarcelstatic int	rt_txrx_enable(struct rt_softc *sc);
117213365Smarcelstatic int	rt_alloc_rx_ring(struct rt_softc *sc,
118213365Smarcel		    struct rt_softc_rx_ring *ring);
119213365Smarcelstatic void	rt_reset_rx_ring(struct rt_softc *sc,
120213365Smarcel		    struct rt_softc_rx_ring *ring);
121213365Smarcelstatic void	rt_free_rx_ring(struct rt_softc *sc,
122213365Smarcel		    struct rt_softc_rx_ring *ring);
123213365Smarcelstatic int	rt_alloc_tx_ring(struct rt_softc *sc,
124213365Smarcel		    struct rt_softc_tx_ring *ring, int qid);
125213365Smarcelstatic void	rt_reset_tx_ring(struct rt_softc *sc,
126213365Smarcel		    struct rt_softc_tx_ring *ring);
127213365Smarcelstatic void	rt_free_tx_ring(struct rt_softc *sc,
128213365Smarcel		    struct rt_softc_tx_ring *ring);
129213365Smarcelstatic void	rt_dma_map_addr(void *arg, bus_dma_segment_t *segs,
130213365Smarcel		    int nseg, int error);
131213365Smarcelstatic void	rt_sysctl_attach(struct rt_softc *sc);
132213365Smarcel#ifdef IF_RT_PHY_SUPPORT
133213365Smarcelvoid		rt_miibus_statchg(device_t);
134213365Smarcelstatic int	rt_miibus_readreg(device_t, int, int);
135138509Sphkstatic int	rt_miibus_writereg(device_t, int, int, int);
136138509Sphk#endif
137138509Sphkstatic int	rt_ifmedia_upd(struct ifnet *);
138138509Sphkstatic void	rt_ifmedia_sts(struct ifnet *, struct ifmediareq *);
139138509Sphk
140100363Smuxstatic SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters");
14199264Smux#ifdef IF_RT_DEBUG
142100363Smuxstatic int rt_debug = 0;
143100363SmuxSYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0,
144100363Smux    "RT debug level");
145100363Smux#endif
146100363Smux
147100363Smuxstatic int
148100363Smuxrt_probe(device_t dev)
149100363Smux{
150100363Smux	device_set_desc(dev, "Ralink RT305XF onChip Ethernet MAC");
151100363Smux	return (BUS_PROBE_NOWILDCARD);
152100363Smux}
153168185Spjd
15499264Smux/*
15599264Smux * macaddr_atoi - translate string MAC address to uint8_t array
15699264Smux */
15799264Smuxstatic int
15899264Smuxmacaddr_atoi(const char *str, uint8_t *mac)
15999264Smux{
160100363Smux	int count, i;
16199264Smux	unsigned int amac[ETHER_ADDR_LEN];	/* Aligned version */
16299264Smux
16399264Smux	count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
16499264Smux	    &amac[0], &amac[1], &amac[2],
165165288Srodrigc	    &amac[3], &amac[4], &amac[5]);
166165288Srodrigc	if (count < ETHER_ADDR_LEN) {
167165288Srodrigc		memset(mac, 0, ETHER_ADDR_LEN);
168165288Srodrigc		return (1);
169167551Spjd	}
170181528Skib
171181528Skib	/* Copy aligned to result */
172165288Srodrigc	for (i = 0; i < ETHER_ADDR_LEN; i ++)
173165288Srodrigc		mac[i] = (amac[i] & 0xff);
174165288Srodrigc
175165288Srodrigc	return (0);
176165288Srodrigc}
177165288Srodrigc
178219925Sjh#ifdef USE_GENERATED_MAC_ADDRESS
179219925Sjhstatic char *
180219925Sjhkernenv_next(char *cp)
181219925Sjh{
182219925Sjh
183219925Sjh	if (cp != NULL) {
184219925Sjh		while (*cp != 0)
185220040Sjh			cp++;
186219925Sjh		cp++;
187219925Sjh		if (*cp == 0)
188219925Sjh			cp = NULL;
189219925Sjh	}
190219925Sjh	return (cp);
191219925Sjh}
192219925Sjh
193219925Sjh/*
194220040Sjh * generate_mac(uin8_t *mac)
195219925Sjh * This is MAC address generator for cases when real device MAC address
196219925Sjh * unknown or not yet accessible.
19799264Smux * Use 'b','s','d' signature and 3 octets from CRC32 on kenv.
198127476Spjd * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0]
199127476Spjd *
200127476Spjd * Output - MAC address, that do not change between reboots, if hints or
201127476Spjd * bootloader info unchange.
202127476Spjd */
203189290Sjamiestatic void
204127476Spjdgenerate_mac(uint8_t *mac)
205127476Spjd{
206127476Spjd	unsigned char *cp;
207127476Spjd	int i = 0;
208127476Spjd	uint32_t crc = 0xffffffff;
209127476Spjd
210127476Spjd	/* Generate CRC32 on kenv */
211127476Spjd	if (dynamic_kenv) {
212127476Spjd		for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) {
213127476Spjd			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
214189290Sjamie		}
215189290Sjamie	} else {
216189290Sjamie		for (cp = kern_envp; cp != NULL; cp = kernenv_next(cp)) {
217189290Sjamie			crc = calculate_crc32c(crc, cp, strlen(cp) + 1);
218189290Sjamie		}
219189290Sjamie	}
220189290Sjamie	crc = ~crc;
221189290Sjamie
222189290Sjamie	mac[0] = 'b';
223189290Sjamie	mac[1] = 's';
224189290Sjamie	mac[2] = 'd';
225219925Sjh	mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff);
226219925Sjh	mac[4] = (crc >> 8) & 0xff;
227219925Sjh	mac[5] = crc & 0xff;
228219925Sjh}
229127476Spjd#endif
230127476Spjd
231127476Spjd/*
232127476Spjd * ether_request_mac - try to find usable MAC address.
233100363Smux */
234100363Smuxstatic int
235100363Smuxether_request_mac(device_t dev, uint8_t *mac)
236100363Smux{
237100363Smux	char *var;
238100363Smux
239100363Smux	/*
240100363Smux	 * "ethaddr" is passed via envp on RedBoot platforms
241100363Smux	 * "kmac" is passed via argv on RouterBOOT platforms
242100363Smux	 */
243100363Smux#if defined(__U_BOOT__) ||  defined(__REDBOOT__) || defined(__ROUTERBOOT__)
244100363Smux	if ((var = getenv("ethaddr")) != NULL ||
245127476Spjd	    (var = getenv("kmac")) != NULL ) {
246100363Smux
247100363Smux		if(!macaddr_atoi(var, mac)) {
248100363Smux			printf("%s: use %s macaddr from KENV\n",
249100363Smux			    device_get_nameunit(dev), var);
250100363Smux			freeenv(var);
251100363Smux			return (0);
252100363Smux		}
253100363Smux		freeenv(var);
254100363Smux	}
255100363Smux#endif
256100363Smux
25799264Smux	/*
25899264Smux	 * Try from hints
259189290Sjamie	 * hint.[dev].[unit].macaddr
26099264Smux	 */
26199264Smux	if (!resource_string_value(device_get_name(dev),
26299264Smux	    device_get_unit(dev), "macaddr", (const char **)&var)) {
26399264Smux
264189290Sjamie		if(!macaddr_atoi(var, mac)) {
26599264Smux			printf("%s: use %s macaddr from hints\n",
266189290Sjamie			    device_get_nameunit(dev), var);
26799264Smux			return (0);
268111119Simp		}
26999264Smux	}
270122640Skan
271122640Skan#ifdef USE_GENERATED_MAC_ADDRESS
27299264Smux	generate_mac(mac);
27399264Smux
27499264Smux	device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x "
275122640Skan	    "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
276122523Skan#else
277122523Skan	/* Hardcoded */
278122640Skan	mac[0] = 0x00;
279122523Skan	mac[1] = 0x18;
280122523Skan	mac[2] = 0xe7;
281122523Skan	mac[3] = 0xd5;
282122523Skan	mac[4] = 0x83;
283122523Skan	mac[5] = 0x90;
284122523Skan
285122523Skan	device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n");
286122523Skan#endif
287189290Sjamie
288189290Sjamie	return (0);
289189290Sjamie}
290189290Sjamie
291189290Sjamiestatic int
292189290Sjamiert_attach(device_t dev)
293189290Sjamie{
294189290Sjamie	struct rt_softc *sc;
295189290Sjamie	struct ifnet *ifp;
296189290Sjamie	int error, i;
297189290Sjamie
298189290Sjamie	sc = device_get_softc(dev);
299189290Sjamie	sc->dev = dev;
300100631Smux
301100631Smux	mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
302100631Smux	    MTX_DEF | MTX_RECURSE);
303100631Smux
304100631Smux	sc->mem_rid = 0;
305100631Smux	sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
306100631Smux	    RF_ACTIVE);
307100631Smux	if (sc->mem == NULL) {
308122640Skan		device_printf(dev, "could not allocate memory resource\n");
309189290Sjamie		error = ENXIO;
310122640Skan		goto fail;
311122640Skan	}
312122523Skan
313100631Smux	sc->bst = rman_get_bustag(sc->mem);
314134827Salfred	sc->bsh = rman_get_bushandle(sc->mem);
315111119Simp
316100363Smux	sc->irq_rid = 0;
317100363Smux	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
318100363Smux	    RF_ACTIVE);
319100363Smux	if (sc->irq == NULL) {
32099264Smux		device_printf(dev,
32199264Smux		    "could not allocate interrupt resource\n");
322100363Smux		error = ENXIO;
323100363Smux		goto fail;
324100363Smux	}
32599264Smux
32699264Smux#ifdef IF_RT_DEBUG
327100363Smux	sc->debug = rt_debug;
32899264Smux
32999264Smux	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
33099264Smux		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
33199264Smux		"debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level");
33299264Smux#endif
33399264Smux
33499264Smux	device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n",
33599264Smux	    sc->mac_rev);
336100363Smux
337100363Smux	/* Reset hardware */
338176383Syar	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
339220937Sjh
340220937Sjh	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
341220937Sjh	    (
342100363Smux	    GDM_ICS_EN | /* Enable IP Csum */
343100363Smux	    GDM_TCS_EN | /* Enable TCP Csum */
344220937Sjh	    GDM_UCS_EN | /* Enable UDP Csum */
345100363Smux	    GDM_STRPCRC | /* Strip CRC from packet */
346220937Sjh	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
347100363Smux	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
348220937Sjh	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
349111119Simp	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
350220937Sjh	    ));
351100363Smux
352111119Simp	/* allocate Tx and Rx rings */
353100363Smux	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
354220937Sjh		error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i);
355100363Smux		if (error != 0) {
356100363Smux			device_printf(dev, "could not allocate Tx ring #%d\n",
357189290Sjamie			    i);
358220937Sjh			goto fail;
359100363Smux		}
360220937Sjh	}
361100363Smux
362100363Smux	sc->tx_ring_mgtqid = 5;
363100363Smux
364167232Srwatson	error = rt_alloc_rx_ring(sc, &sc->rx_ring);
36599264Smux	if (error != 0) {
36699264Smux		device_printf(dev, "could not allocate Rx ring\n");
367225617Skmacy		goto fail;
36899264Smux	}
36999264Smux
370107850Salfred	callout_init(&sc->periodic_ch, 0);
371107850Salfred	callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0);
372107850Salfred
37399264Smux	ifp = sc->ifp = if_alloc(IFT_ETHER);
37499264Smux	if (ifp == NULL) {
375131897Sphk		device_printf(dev, "could not if_alloc()\n");
37699264Smux		error = ENOMEM;
377131897Sphk		goto fail;
378230725Smckusick	}
37999264Smux
380230725Smckusick	ifp->if_softc = sc;
381230725Smckusick	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
382230725Smckusick	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
383230725Smckusick	ifp->if_init = rt_init;
384230725Smckusick	ifp->if_ioctl = rt_ioctl;
385230725Smckusick	ifp->if_start = rt_start;
386230725Smckusick#define	RT_TX_QLEN	256
387230725Smckusick
388188150Sattilio	IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN);
389230725Smckusick	ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN;
390159274Srwatson	IFQ_SET_READY(&ifp->if_snd);
391173064Srodrigc
392173064Srodrigc#ifdef IF_RT_PHY_SUPPORT
393173064Srodrigc	error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd,
394173064Srodrigc	    rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
395215747Spluknet	if (error != 0) {
396215747Spluknet		device_printf(dev, "attaching PHYs failed\n");
397173064Srodrigc		error = ENXIO;
398230725Smckusick		goto fail;
399138357Sphk	}
400107849Salfred#else
40199264Smux	ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts);
40299264Smux	ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0,
40399264Smux	    NULL);
40499264Smux	ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX);
405188150Sattilio
406188150Sattilio#endif /* IF_RT_PHY_SUPPORT */
407188150Sattilio
40899264Smux	ether_request_mac(dev, sc->mac_addr);
409188150Sattilio	ether_ifattach(ifp, sc->mac_addr);
410138357Sphk
411131897Sphk	/*
412188150Sattilio	 * Tell the upper layer(s) we support long frames.
413188150Sattilio	 */
414188150Sattilio	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
415131897Sphk	ifp->if_capabilities |= IFCAP_VLAN_MTU;
416188150Sattilio	ifp->if_capenable |= IFCAP_VLAN_MTU;
417230725Smckusick	ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM;
418152217Srodrigc	ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM;
419131897Sphk
42099264Smux	/* init task queue */
42199264Smux	TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc);
42299264Smux	TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc);
42399264Smux	TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc);
424138509Sphk
425138509Sphk	sc->rx_process_limit = 100;
426138509Sphk
427138509Sphk	sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT,
428155386Sjeff	    taskqueue_thread_enqueue, &sc->taskqueue);
429155386Sjeff
430155386Sjeff	taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq",
431155386Sjeff	    device_get_nameunit(sc->dev));
432188150Sattilio
433155386Sjeff	rt_sysctl_attach(sc);
434155386Sjeff
435155386Sjeff	/* set up interrupt */
436155386Sjeff	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
437155386Sjeff	    NULL, rt_intr, sc, &sc->irqh);
438155386Sjeff	if (error != 0) {
439155386Sjeff		printf("%s: could not set up interrupt\n",
440155386Sjeff			device_get_nameunit(dev));
441155386Sjeff		goto fail;
442188150Sattilio	}
443155386Sjeff#ifdef IF_RT_DEBUG
444155386Sjeff	device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug));
445155386Sjeff#endif
446155386Sjeff
447155386Sjeff	return (0);
448138509Sphk
449122523Skanfail:
45099264Smux	/* free Tx and Rx rings */
451168823Spjd	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
452182542Sattilio		rt_free_tx_ring(sc, &sc->tx_ring[i]);
453182542Sattilio
45499264Smux	rt_free_rx_ring(sc, &sc->rx_ring);
45599264Smux
456122523Skan	mtx_destroy(&sc->lock);
457157322Sjeff
458157322Sjeff	if (sc->mem != NULL)
459157322Sjeff		bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
460122523Skan		    sc->mem);
461122523Skan
462235626Smckusick	if (sc->irq != NULL)
463235626Smckusick		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
464157322Sjeff		    sc->irq);
465184554Sattilio
466122523Skan	return (error);
467122523Skan}
468155386Sjeff
469122523Skan/*
470162983Skib * Set media options.
471122523Skan */
472122523Skanstatic int
473182542Sattiliort_ifmedia_upd(struct ifnet *ifp)
474182542Sattilio{
475122523Skan	struct rt_softc *sc;
476122523Skan#ifdef IF_RT_PHY_SUPPORT
477122523Skan	struct mii_data *mii;
478172930Srwatson	struct mii_softc *miisc;
479182542Sattilio	int error = 0;
480122523Skan
481143680Sphk	sc = ifp->if_softc;
482244652Skib	RT_SOFTC_LOCK(sc);
483153546Spjd
484122523Skan	mii = device_get_softc(sc->rt_miibus);
485122523Skan	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
486122640Skan		PHY_RESET(miisc);
487122640Skan	error = mii_mediachg(mii);
488122640Skan	RT_SOFTC_UNLOCK(sc);
489168185Spjd
490159181Spjd	return (error);
491122523Skan
492122523Skan#else /* !IF_RT_PHY_SUPPORT */
493154152Stegge
494186197Sattilio	struct ifmedia *ifm;
495186197Sattilio	struct ifmedia_entry *ife;
496186197Sattilio
497186197Sattilio	sc = ifp->if_softc;
498186197Sattilio	ifm = &sc->rt_ifmedia;
499184554Sattilio	ife = ifm->ifm_cur;
500184554Sattilio
501186197Sattilio	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
502186197Sattilio		return (EINVAL);
503186197Sattilio
504186197Sattilio	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
505186197Sattilio		device_printf(sc->dev,
506186197Sattilio		    "AUTO is not supported for multiphy MAC");
507186197Sattilio		return (EINVAL);
508122523Skan	}
509157343Sjeff
510157343Sjeff	/*
511157343Sjeff	 * Ignore everything
512157343Sjeff	 */
513157343Sjeff	return (0);
514122523Skan#endif /* IF_RT_PHY_SUPPORT */
515157343Sjeff}
516244652Skib
517154152Stegge/*
518154152Stegge * Report current media status.
519235626Smckusick */
520235626Smckusickstatic void
521186197Sattiliort_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
522186197Sattilio{
523140715Sjeff#ifdef IF_RT_PHY_SUPPORT
524122523Skan	struct rt_softc *sc;
525172930Srwatson	struct mii_data *mii;
526122523Skan
527122965Skan	sc = ifp->if_softc;
528122523Skan
529122523Skan	RT_SOFTC_LOCK(sc);
530157322Sjeff	mii = device_get_softc(sc->rt_miibus);
531122523Skan	mii_pollstat(mii);
532122523Skan	ifmr->ifm_active = mii->mii_media_active;
533183188Sobrien	ifmr->ifm_status = mii->mii_media_status;
534230725Smckusick	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
535122523Skan	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
53699264Smux	RT_SOFTC_UNLOCK(sc);
537220937Sjh#else /* !IF_RT_PHY_SUPPORT */
538152735Srodrigc
539152735Srodrigc	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
54099264Smux	ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
541183188Sobrien#endif /* IF_RT_PHY_SUPPORT */
542220937Sjh}
543152735Srodrigc
544152332Srodrigcstatic int
54599264Smuxrt_detach(device_t dev)
54699264Smux{
54799264Smux	struct rt_softc *sc;
54899264Smux	struct ifnet *ifp;
549167551Spjd	int i;
550152735Srodrigc
551152735Srodrigc	sc = device_get_softc(dev);
55299264Smux	ifp = sc->ifp;
55399264Smux
55499264Smux	RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n");
55599264Smux
55699264Smux	RT_SOFTC_LOCK(sc);
55799264Smux
55899264Smux	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
55999264Smux
56099264Smux	callout_stop(&sc->periodic_ch);
561152735Srodrigc	callout_stop(&sc->tx_watchdog_ch);
562152735Srodrigc
563122523Skan	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
56499264Smux	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
56599264Smux	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
56699264Smux
56799264Smux	/* free Tx and Rx rings */
56899264Smux	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
569152735Srodrigc		rt_free_tx_ring(sc, &sc->tx_ring[i]);
570152735Srodrigc
571122523Skan	rt_free_rx_ring(sc, &sc->rx_ring);
57299264Smux
57399264Smux	RT_SOFTC_UNLOCK(sc);
57499264Smux
575152561Srodrigc#ifdef IF_RT_PHY_SUPPORT
576152561Srodrigc	if (sc->rt_miibus != NULL)
577152561Srodrigc		device_delete_child(dev, sc->rt_miibus);
578152561Srodrigc#endif
579167551Spjd
580180484Srodrigc	ether_ifdetach(ifp);
581180484Srodrigc	if_free(ifp);
582156685Sru
583180484Srodrigc	taskqueue_free(sc->taskqueue);
584180484Srodrigc
585156685Sru	mtx_destroy(&sc->lock);
586156685Sru
587182025Srodrigc	bus_generic_detach(dev);
588156685Sru	bus_teardown_intr(dev, sc->irq, sc->irqh);
589182025Srodrigc	bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
590182025Srodrigc	bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
591182025Srodrigc
592182025Srodrigc	return (0);
593182025Srodrigc}
594182025Srodrigc
595156685Srustatic int
596156685Srurt_shutdown(device_t dev)
597156685Sru{
598156685Sru	struct rt_softc *sc;
599156685Sru
600156685Sru	sc = device_get_softc(dev);
601171852Sjhb	RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n");
602171852Sjhb	rt_stop(sc);
603171852Sjhb
604171852Sjhb	return (0);
605156685Sru}
606156685Sru
607171852Sjhbstatic int
608171852Sjhbrt_suspend(device_t dev)
609171852Sjhb{
610171852Sjhb	struct rt_softc *sc;
611156685Sru
612156685Sru	sc = device_get_softc(dev);
613171852Sjhb	RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n");
614171852Sjhb	rt_stop(sc);
615171852Sjhb
616171852Sjhb	return (0);
617156685Sru}
618156685Sru
619171852Sjhbstatic int
620171852Sjhbrt_resume(device_t dev)
621171852Sjhb{
622171852Sjhb	struct rt_softc *sc;
623156685Sru	struct ifnet *ifp;
624156685Sru
625171852Sjhb	sc = device_get_softc(dev);
626171852Sjhb	ifp = sc->ifp;
627171852Sjhb
628171852Sjhb	RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n");
629156685Sru
630156685Sru	if (ifp->if_flags & IFF_UP)
631171852Sjhb		rt_init(sc);
632171852Sjhb
633171852Sjhb	return (0);
634171852Sjhb}
635220937Sjh
636156685Sru/*
637220937Sjh * rt_init_locked - Run initialization process having locked mtx.
638158546Srodrigc */
639174282Srodrigcstatic void
640156685Srurt_init_locked(void *priv)
641174282Srodrigc{
642174282Srodrigc	struct rt_softc *sc;
643174282Srodrigc	struct ifnet *ifp;
644174282Srodrigc#ifdef IF_RT_PHY_SUPPORT
645174282Srodrigc	struct mii_data *mii;
646156685Sru#endif
647156685Sru	int i, ntries;
648156685Sru	uint32_t tmp;
649156685Sru
650156685Sru	sc = priv;
651156685Sru	ifp = sc->ifp;
652156685Sru#ifdef IF_RT_PHY_SUPPORT
653152561Srodrigc	mii = device_get_softc(sc->rt_miibus);
654152561Srodrigc#endif
65599264Smux
65699264Smux	RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n");
65799264Smux
65899264Smux	RT_SOFTC_ASSERT_LOCKED(sc);
65999264Smux
66099264Smux	/* hardware reset */
661122523Skan	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
66299264Smux	rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG);
66399264Smux
664218852Sjh	/* Fwd to CPU (uni|broad|multi)cast and Unknown */
665122523Skan	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
666152735Srodrigc	    (
667152735Srodrigc	    GDM_ICS_EN | /* Enable IP Csum */
668152735Srodrigc	    GDM_TCS_EN | /* Enable TCP Csum */
669152735Srodrigc	    GDM_UCS_EN | /* Enable UDP Csum */
670161584Smarius	    GDM_STRPCRC | /* Strip CRC from packet */
671161584Smarius	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
672152735Srodrigc	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
673152735Srodrigc	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
674161584Smarius	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
675152735Srodrigc	    ));
676161584Smarius
677167551Spjd	/* disable DMA engine */
678152217Srodrigc	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, 0);
679152735Srodrigc	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, 0xffffffff);
680218852Sjh
681122523Skan	/* wait while DMA engine is busy */
68299264Smux	for (ntries = 0; ntries < 100; ntries++) {
68399264Smux		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
68499264Smux		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
68599264Smux			break;
68699264Smux		DELAY(1000);
68799264Smux	}
68899264Smux
68999264Smux	if (ntries == 100) {
69099264Smux		device_printf(sc->dev, "timeout waiting for DMA engine\n");
69199264Smux		goto fail;
69299264Smux	}
69399264Smux
69499264Smux	/* reset Rx and Tx rings */
69599264Smux	tmp = FE_RST_DRX_IDX0 |
69699264Smux		FE_RST_DTX_IDX3 |
69799264Smux		FE_RST_DTX_IDX2 |
698225617Skmacy		FE_RST_DTX_IDX1 |
69999264Smux		FE_RST_DTX_IDX0;
70099264Smux
701107850Salfred	RT_WRITE(sc, PDMA_BASE + PDMA_RST_IDX, tmp);
702107850Salfred
703107850Salfred	/* XXX switch set mac address */
704107850Salfred	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
70599264Smux		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
70699264Smux
70799264Smux	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
708138509Sphk		/* update TX_BASE_PTRx */
709138461Sphk		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
710230725Smckusick			sc->tx_ring[i].desc_phys_addr);
71199264Smux		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
71299264Smux			RT_SOFTC_TX_RING_DESC_COUNT);
713230725Smckusick		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
714244135Skib	}
715230725Smckusick
716230725Smckusick	/* init Rx ring */
717230725Smckusick	rt_reset_rx_ring(sc, &sc->rx_ring);
718230725Smckusick
719159274Srwatson	/* update RX_BASE_PTR0 */
720230725Smckusick	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
721230725Smckusick		sc->rx_ring.desc_phys_addr);
722173064Srodrigc	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
723173064Srodrigc		RT_SOFTC_RX_RING_DATA_COUNT);
724173064Srodrigc	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
725173064Srodrigc		RT_SOFTC_RX_RING_DATA_COUNT - 1);
726215747Spluknet
727215747Spluknet	/* write back DDONE, 16byte burst enable RX/TX DMA */
728173064Srodrigc	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG,
729230725Smckusick	    FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN);
730138357Sphk
731111119Simp	/* disable interrupts mitigation */
732107849Salfred	RT_WRITE(sc, PDMA_BASE + DELAY_INT_CFG, 0);
733159982Sjhb
734159982Sjhb	/* clear pending interrupts */
735159982Sjhb	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, 0xffffffff);
736138087Sphk
737159982Sjhb	/* enable interrupts */
738195104Srwatson	tmp = 	CNT_PPE_AF |
739159982Sjhb		CNT_GDM_AF |
740159982Sjhb		PSE_P2_FC |
74199264Smux		GDM_CRC_DROP |
742159982Sjhb		PSE_BUF_DROP |
743159982Sjhb		GDM_OTHER_DROP |
744138509Sphk		PSE_P1_FC |
745159982Sjhb		PSE_P0_FC |
746159982Sjhb		PSE_FQ_EMPTY |
747159982Sjhb		INT_TX_COHERENT |
748138509Sphk		INT_RX_COHERENT |
749159982Sjhb		INT_TXQ3_DONE |
750138509Sphk		INT_TXQ2_DONE |
751266024Sbdrewery		INT_TXQ1_DONE |
752138509Sphk		INT_TXQ0_DONE |
753230725Smckusick		INT_RX_DONE;
754230725Smckusick
755230725Smckusick	sc->intr_enable_mask = tmp;
756138509Sphk
757230725Smckusick	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
758159982Sjhb
75999264Smux	if (rt_txrx_enable(sc) != 0)
76099264Smux		goto fail;
76199264Smux
762212341Spjd#ifdef IF_RT_PHY_SUPPORT
763212341Spjd	if (mii) mii_mediachg(mii);
764212341Spjd#endif /* IF_RT_PHY_SUPPORT */
765212341Spjd
766212341Spjd	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
767218852Sjh	ifp->if_drv_flags |= IFF_DRV_RUNNING;
768218852Sjh
769224655Smm	sc->periodic_round = 0;
770218852Sjh
771230725Smckusick	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
772218852Sjh
773212341Spjd	return;
774212341Spjd
775212341Spjdfail:
776212341Spjd	rt_stop_locked(sc);
777212341Spjd}
778212341Spjd
779138509Sphk/*
780212341Spjd * rt_init - lock and initialize device.
781212341Spjd */
782212341Spjdstatic void
783212341Spjdrt_init(void *priv)
784212341Spjd{
785212341Spjd	struct rt_softc *sc;
786212341Spjd
787212341Spjd	sc = priv;
788212341Spjd	RT_SOFTC_LOCK(sc);
789212341Spjd	rt_init_locked(sc);
790212341Spjd	RT_SOFTC_UNLOCK(sc);
791212341Spjd}
792212341Spjd
793212341Spjd/*
794212341Spjd * rt_stop_locked - stop TX/RX w/ lock
795212341Spjd */
796212341Spjdstatic void
797212341Spjdrt_stop_locked(void *priv)
798212341Spjd{
799212341Spjd	struct rt_softc *sc;
800212341Spjd	struct ifnet *ifp;
801212341Spjd
802212341Spjd	sc = priv;
803212341Spjd	ifp = sc->ifp;
804212341Spjd
805212341Spjd	RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n");
806212341Spjd
807212341Spjd	RT_SOFTC_ASSERT_LOCKED(sc);
808212341Spjd	sc->tx_timer = 0;
809212341Spjd	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
810212341Spjd	callout_stop(&sc->periodic_ch);
811212341Spjd	callout_stop(&sc->tx_watchdog_ch);
812218852Sjh	RT_SOFTC_UNLOCK(sc);
813212341Spjd	taskqueue_block(sc->taskqueue);
814212341Spjd
815212341Spjd	/*
816212341Spjd	 * Sometime rt_stop_locked called from isr and we get panic
817212341Spjd	 * When found, I fix it
818212341Spjd	 */
819212341Spjd#ifdef notyet
820212341Spjd	taskqueue_drain(sc->taskqueue, &sc->rx_done_task);
821217792Sjh	taskqueue_drain(sc->taskqueue, &sc->tx_done_task);
822212341Spjd	taskqueue_drain(sc->taskqueue, &sc->periodic_task);
823212341Spjd#endif
824212341Spjd	RT_SOFTC_LOCK(sc);
825212356Spjd
826212356Spjd	/* disable interrupts */
827212356Spjd	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, 0);
828212341Spjd
829212341Spjd	/* reset adapter */
830212341Spjd	RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET);
831212341Spjd
832212341Spjd	RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG,
833212341Spjd	    (
834212341Spjd	    GDM_ICS_EN | /* Enable IP Csum */
835218852Sjh	    GDM_TCS_EN | /* Enable TCP Csum */
836212341Spjd	    GDM_UCS_EN | /* Enable UDP Csum */
837212341Spjd	    GDM_STRPCRC | /* Strip CRC from packet */
838212341Spjd	    GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */
839212341Spjd	    GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */
840212341Spjd	    GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */
841212341Spjd	    GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT   /* Forward Other to CPU */
842212341Spjd	    ));
843212341Spjd}
844233027Skib
845233027Skibstatic void
846212341Spjdrt_stop(void *priv)
847212341Spjd{
848212341Spjd	struct rt_softc *sc;
849212341Spjd
850212341Spjd	sc = priv;
851212341Spjd	RT_SOFTC_LOCK(sc);
852212341Spjd	rt_stop_locked(sc);
853212341Spjd	RT_SOFTC_UNLOCK(sc);
854212341Spjd}
855212341Spjd
856212341Spjd/*
857212341Spjd * rt_tx_data - transmit packet.
858212341Spjd */
859212341Spjdstatic int
860212341Spjdrt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
861212341Spjd{
862212341Spjd	struct ifnet *ifp;
863212341Spjd	struct rt_softc_tx_ring *ring;
864212341Spjd	struct rt_softc_tx_data *data;
865212341Spjd	struct rt_txdesc *desc;
866212341Spjd	struct mbuf *m_d;
867212341Spjd	bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER];
868212341Spjd	int error, ndmasegs, ndescs, i;
869212341Spjd
870212341Spjd	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
871212341Spjd		("%s: Tx data: invalid qid=%d\n",
872212341Spjd		 device_get_nameunit(sc->dev), qid));
873212341Spjd
87499264Smux	RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]);
875212341Spjd
876212341Spjd	ifp = sc->ifp;
877212341Spjd	ring = &sc->tx_ring[qid];
878212341Spjd	desc = &ring->desc[ring->desc_cur];
879218852Sjh	data = &ring->data[ring->data_cur];
880218852Sjh
881230725Smckusick	error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m,
882218852Sjh	    dma_seg, &ndmasegs, 0);
883212341Spjd	if (error != 0)	{
884212341Spjd		/* too many fragments, linearize */
885212341Spjd
886212341Spjd		RT_DPRINTF(sc, RT_DEBUG_TX,
887212341Spjd			"could not load mbuf DMA map, trying to linearize "
888230725Smckusick			"mbuf: ndmasegs=%d, len=%d, error=%d\n",
889230725Smckusick			ndmasegs, m->m_pkthdr.len, error);
890212341Spjd
891212341Spjd		m_d = m_collapse(m, M_NOWAIT, 16);
892212341Spjd		if (m_d == NULL) {
893212341Spjd			m_freem(m);
894212341Spjd			m = NULL;
895212341Spjd			return (ENOMEM);
896212341Spjd		}
897212341Spjd		m = m_d;
898212341Spjd
899212341Spjd		sc->tx_defrag_packets++;
900212341Spjd
901212341Spjd		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
902212341Spjd		    data->dma_map, m, dma_seg, &ndmasegs, 0);
903212341Spjd		if (error != 0)	{
904212341Spjd			device_printf(sc->dev, "could not load mbuf DMA map: "
905212341Spjd			    "ndmasegs=%d, len=%d, error=%d\n",
906212341Spjd			    ndmasegs, m->m_pkthdr.len, error);
907212341Spjd			m_freem(m);
908212341Spjd			return (error);
909212341Spjd		}
910212341Spjd	}
911212341Spjd
912212341Spjd	if (m->m_pkthdr.len == 0)
913212341Spjd		ndmasegs = 0;
914212341Spjd
915212341Spjd	/* determine how many Tx descs are required */
916212341Spjd	ndescs = 1 + ndmasegs / 2;
917212341Spjd	if ((ring->desc_queued + ndescs) >
918212341Spjd	    (RT_SOFTC_TX_RING_DESC_COUNT - 2)) {
919212341Spjd		RT_DPRINTF(sc, RT_DEBUG_TX,
920212341Spjd		    "there are not enough Tx descs\n");
921212341Spjd
922212341Spjd		sc->no_tx_desc_avail++;
923212341Spjd
924212341Spjd		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
925212341Spjd		m_freem(m);
926212341Spjd		return (EFBIG);
927212341Spjd	}
928212341Spjd
929212341Spjd	data->m = m;
930212341Spjd
931212341Spjd	/* set up Tx descs */
932212341Spjd	for (i = 0; i < ndmasegs; i += 2) {
933212341Spjd		/* Set destenation */
934212341Spjd		desc->dst = (TXDSCR_DST_PORT_GDMA1);
935212341Spjd		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
936212341Spjd			desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN|
937212341Spjd			    TXDSCR_TCP_CSUM_GEN);
938212341Spjd		/* Set queue id */
939212341Spjd		desc->qn = qid;
940218852Sjh		/* No PPPoE */
941212341Spjd		desc->pppoe = 0;
942212341Spjd		/* No VLAN */
943212341Spjd		desc->vid = 0;
944212341Spjd
945212341Spjd		desc->sdp0 = htole32(dma_seg[i].ds_addr);
946212341Spjd		desc->sdl0 = htole16(dma_seg[i].ds_len |
947212341Spjd		    ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 ));
948217792Sjh
949212341Spjd		if ((i+1) < ndmasegs) {
950218852Sjh			desc->sdp1 = htole32(dma_seg[i+1].ds_addr);
951212341Spjd			desc->sdl1 = htole16(dma_seg[i+1].ds_len |
952212341Spjd			    ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 ));
953212341Spjd		} else {
954212341Spjd			desc->sdp1 = 0;
955218852Sjh			desc->sdl1 = 0;
956212341Spjd		}
957212341Spjd
958212341Spjd		if ((i+2) < ndmasegs) {
959212341Spjd			ring->desc_queued++;
960212341Spjd			ring->desc_cur = (ring->desc_cur + 1) %
961212341Spjd			    RT_SOFTC_TX_RING_DESC_COUNT;
962212341Spjd		}
963212341Spjd		desc = &ring->desc[ring->desc_cur];
964212341Spjd	}
965212341Spjd
966212341Spjd	RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, "
967218852Sjh	    "DMA ds_len=%d/%d/%d/%d/%d\n",
968212341Spjd	    m->m_pkthdr.len, ndmasegs,
969212341Spjd	    (int) dma_seg[0].ds_len,
970212341Spjd	    (int) dma_seg[1].ds_len,
971212341Spjd	    (int) dma_seg[2].ds_len,
972212341Spjd	    (int) dma_seg[3].ds_len,
973212341Spjd	    (int) dma_seg[4].ds_len);
974212341Spjd
975212341Spjd	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
976212341Spjd		BUS_DMASYNC_PREWRITE);
977212341Spjd	bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
978212341Spjd		BUS_DMASYNC_PREWRITE);
979212341Spjd	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
980212341Spjd		BUS_DMASYNC_PREWRITE);
981212341Spjd
982212341Spjd	ring->desc_queued++;
983212341Spjd	ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT;
984212341Spjd
985233027Skib	ring->data_queued++;
986233027Skib	ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT;
987212341Spjd
988212341Spjd	/* kick Tx */
989212341Spjd	RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(qid), ring->desc_cur);
990212341Spjd
991212341Spjd	return (0);
992212341Spjd}
993212341Spjd
994212341Spjd/*
995212341Spjd * rt_start - start Transmit/Receive
996212341Spjd */
997212341Spjdstatic void
998218852Sjhrt_start(struct ifnet *ifp)
999212341Spjd{
1000212341Spjd	struct rt_softc *sc;
1001212341Spjd	struct mbuf *m;
1002212341Spjd	int qid = 0 /* XXX must check QoS priority */;
1003212341Spjd
1004212341Spjd	sc = ifp->if_softc;
1005212341Spjd
1006212466Skib	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1007212466Skib		return;
1008212466Skib
1009212466Skib	for (;;) {
1010212341Spjd		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1011212341Spjd		if (m == NULL)
1012212341Spjd			break;
1013212341Spjd
1014212341Spjd		m->m_pkthdr.rcvif = NULL;
1015212341Spjd
1016218852Sjh		RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]);
1017212341Spjd
1018212341Spjd		if (sc->tx_ring[qid].data_queued >=
1019212341Spjd		    RT_SOFTC_TX_RING_DATA_COUNT) {
1020122523Skan			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
1021122523Skan
1022122523Skan			RT_DPRINTF(sc, RT_DEBUG_TX,
1023122523Skan			    "if_start: Tx ring with qid=%d is full\n", qid);
1024218852Sjh
1025218852Sjh			m_freem(m);
1026218852Sjh
1027230725Smckusick			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1028218852Sjh			ifp->if_oerrors++;
1029122640Skan
1030122523Skan			sc->tx_data_queue_full[qid]++;
103199264Smux
103299264Smux			break;
1033212341Spjd		}
1034231692Smm
1035212341Spjd		if (rt_tx_data(sc, m, qid) != 0) {
103699264Smux			RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
103799264Smux
103899264Smux			ifp->if_oerrors++;
103999264Smux
104099264Smux			break;
104199264Smux		}
104299264Smux
104399264Smux		RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]);
104499264Smux		sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT;
1045167553Spjd		callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1046164033Srwatson	}
104799264Smux}
104899264Smux
1049127473Spjd/*
105099264Smux * rt_update_promisc - set/clear promiscuous mode. Unused yet, because
1051127473Spjd * filtering done by attached Ethernet switch.
105299264Smux */
1053164033Srwatsonstatic void
1054164033Srwatsonrt_update_promisc(struct ifnet *ifp)
1055164033Srwatson{
105699264Smux	struct rt_softc *sc;
105799264Smux
1058164033Srwatson	sc = ifp->if_softc;
1059164033Srwatson	printf("%s: %s promiscuous mode\n",
1060164033Srwatson		device_get_nameunit(sc->dev),
1061164033Srwatson		(ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
1062164033Srwatson}
106399264Smux
1064164033Srwatson/*
106599264Smux * rt_ioctl - ioctl handler.
1066164033Srwatson */
1067164033Srwatsonstatic int
1068164033Srwatsonrt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1069164033Srwatson{
1070155902Sjeff	struct rt_softc *sc;
1071155902Sjeff	struct ifreq *ifr;
1072155902Sjeff#ifdef IF_RT_PHY_SUPPORT
1073155902Sjeff	struct mii_data *mii;
1074155902Sjeff#endif /* IF_RT_PHY_SUPPORT */
1075155902Sjeff	int error, startall;
1076155902Sjeff
1077155902Sjeff	sc = ifp->if_softc;
1078155902Sjeff	ifr = (struct ifreq *) data;
1079155902Sjeff
1080155902Sjeff	error = 0;
1081168396Spjd
1082168396Spjd	switch (cmd) {
1083155902Sjeff	case SIOCSIFFLAGS:
1084212341Spjd		startall = 0;
108599264Smux		RT_SOFTC_LOCK(sc);
1086212341Spjd		if (ifp->if_flags & IFF_UP) {
108799264Smux			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1088212341Spjd				if ((ifp->if_flags ^ sc->if_flags) &
1089212341Spjd				    IFF_PROMISC)
1090212341Spjd					rt_update_promisc(ifp);
1091212341Spjd			} else {
109299264Smux				rt_init_locked(sc);
1093212341Spjd				startall = 1;
1094212341Spjd			}
109599264Smux		} else {
109699264Smux			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1097224655Smm				rt_stop_locked(sc);
1098231692Smm		}
1099231692Smm		sc->if_flags = ifp->if_flags;
1100231692Smm		RT_SOFTC_UNLOCK(sc);
1101231692Smm		break;
1102231692Smm	case SIOCGIFMEDIA:
1103231692Smm	case SIOCSIFMEDIA:
1104231692Smm#ifdef IF_RT_PHY_SUPPORT
1105231692Smm		mii = device_get_softc(sc->rt_miibus);
1106231692Smm		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1107231692Smm#else
1108218852Sjh		error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd);
1109212341Spjd#endif /* IF_RT_PHY_SUPPORT */
111099264Smux		break;
1111224712Smm	default:
1112224712Smm		error = ether_ioctl(ifp, cmd, data);
1113224712Smm		break;
111499264Smux	}
111599264Smux	return (error);
111699264Smux}
111799264Smux
111899264Smux/*
111999264Smux * rt_periodic - Handler of PERIODIC interrupt
1120167232Srwatson */
1121167232Srwatsonstatic void
112299264Smuxrt_periodic(void *arg)
112399264Smux{
112499264Smux	struct rt_softc *sc;
112599264Smux
112699264Smux	sc = arg;
112799264Smux	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n");
112899264Smux	taskqueue_enqueue(sc->taskqueue, &sc->periodic_task);
112999264Smux}
113099264Smux
1131225617Skmacy/*
113299264Smux * rt_tx_watchdog - Handler of TX Watchdog
113399264Smux */
1134107850Salfredstatic void
1135107850Salfredrt_tx_watchdog(void *arg)
113699264Smux{
113799264Smux	struct rt_softc *sc;
1138231692Smm	struct ifnet *ifp;
113999264Smux
1140224655Smm	sc = arg;
1141231692Smm	ifp = sc->ifp;
114299264Smux
1143195247Srwatson	if (sc->tx_timer == 0)
1144167672Spjd		return;
1145164033Srwatson
1146164033Srwatson	if (--sc->tx_timer == 0) {
1147125340Spjd		device_printf(sc->dev, "Tx watchdog timeout: resetting\n");
1148125340Spjd#ifdef notyet
1149125340Spjd		/*
1150117132Siedowse		 * XXX: Commented out, because reset break input.
1151117132Siedowse		 */
1152117132Siedowse		rt_stop_locked(sc);
1153117132Siedowse		rt_init_locked(sc);
115499264Smux#endif
1155117132Siedowse		ifp->if_oerrors++;
1156159982Sjhb		sc->tx_watchdog_timeouts++;
1157117132Siedowse	}
1158195247Srwatson	callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc);
1159117132Siedowse}
1160117132Siedowse
1161159982Sjhb/*
1162117132Siedowse * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt
1163117132Siedowse */
1164117132Siedowsestatic void
116599264Smuxrt_cnt_ppe_af(struct rt_softc *sc)
1166117132Siedowse{
1167127476Spjd
1168117132Siedowse	RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n");
1169117132Siedowse}
1170117132Siedowse
1171127476Spjd/*
1172117132Siedowse * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt
1173117132Siedowse */
1174231692Smmstatic void
1175231692Smmrt_cnt_gdm_af(struct rt_softc *sc)
1176231692Smm{
1177231692Smm
1178231692Smm	RT_DPRINTF(sc, RT_DEBUG_INTR,
1179231692Smm	    "GDMA 1 & 2 Counter Table Almost Full\n");
1180231692Smm}
1181231692Smm
1182231692Smm/*
1183231692Smm * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt
1184231692Smm */
1185231692Smmstatic void
1186231692Smmrt_pse_p2_fc(struct rt_softc *sc)
1187231692Smm{
1188231692Smm
1189117132Siedowse	RT_DPRINTF(sc, RT_DEBUG_INTR,
1190127476Spjd	    "PSE port2 (GDMA 2) flow control asserted.\n");
1191117132Siedowse}
1192117132Siedowse
1193127476Spjd/*
1194117132Siedowse * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error
1195117132Siedowse * interrupt
1196117132Siedowse */
1197119885Siedowsestatic void
1198119885Siedowsert_gdm_crc_drop(struct rt_softc *sc)
1199119885Siedowse{
1200119885Siedowse
1201119885Siedowse	RT_DPRINTF(sc, RT_DEBUG_INTR,
1202119885Siedowse	    "GDMA 1 & 2 discard a packet due to CRC error\n");
1203119885Siedowse}
1204159982Sjhb
1205119885Siedowse/*
1206119885Siedowse * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt
1207117132Siedowse */
120899264Smuxstatic void
120999264Smuxrt_pse_buf_drop(struct rt_softc *sc)
121099264Smux{
1211159982Sjhb
1212159982Sjhb	RT_DPRINTF(sc, RT_DEBUG_INTR,
121399264Smux	    "PSE discards a packet due to buffer sharing limitation\n");
1214159982Sjhb}
1215138087Sphk
1216138087Sphk/*
1217138087Sphk * rt_gdm_other_drop - Handler of discard on other reason interrupt
121899264Smux */
121999264Smuxstatic void
122099264Smuxrt_gdm_other_drop(struct rt_softc *sc)
122199264Smux{
122299264Smux
122399264Smux	RT_DPRINTF(sc, RT_DEBUG_INTR,
122499264Smux	    "GDMA 1 & 2 discard a packet due to other reason\n");
122599264Smux}
122699264Smux
122799264Smux/*
122899264Smux * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt
122999264Smux */
123099264Smuxstatic void
1231230725Smckusickrt_pse_p1_fc(struct rt_softc *sc)
1232162983Skib{
123399264Smux
1234138087Sphk	RT_DPRINTF(sc, RT_DEBUG_INTR,
1235138087Sphk	    "PSE port1 (GDMA 1) flow control asserted.\n");
1236162407Skib}
1237162983Skib
1238162407Skib/*
1239162407Skib * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt
1240175202Sattilio */
1241162407Skibstatic void
1242162407Skibrt_pse_p0_fc(struct rt_softc *sc)
1243162407Skib{
1244162407Skib
1245162407Skib	RT_DPRINTF(sc, RT_DEBUG_INTR,
1246162983Skib	    "PSE port0 (CDMA) flow control asserted.\n");
1247162983Skib}
1248175294Sattilio
1249162407Skib/*
1250162407Skib * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt
1251162407Skib */
1252162407Skibstatic void
1253162407Skibrt_pse_fq_empty(struct rt_softc *sc)
1254162407Skib{
1255162407Skib
1256162407Skib	RT_DPRINTF(sc, RT_DEBUG_INTR,
1257162407Skib	    "PSE free Q empty threshold reached & forced drop "
1258162444Skib		    "condition occurred.\n");
1259175294Sattilio}
1260162407Skib
1261162407Skib/*
1262162407Skib * rt_intr - main ISR
1263249053Skib */
1264140715Sjeffstatic void
1265244652Skibrt_intr(void *arg)
1266244652Skib{
1267140715Sjeff	struct rt_softc *sc;
1268155902Sjeff	struct ifnet *ifp;
1269175294Sattilio	uint32_t status;
1270249053Skib
127199264Smux	sc = arg;
127299264Smux	ifp = sc->ifp;
1273172151Skib
127499264Smux	/* acknowledge interrupts */
127599264Smux	status = RT_READ(sc, GE_PORT_BASE + FE_INT_STATUS);
127699264Smux	RT_WRITE(sc, GE_PORT_BASE + FE_INT_STATUS, status);
1277184554Sattilio
1278184554Sattilio	RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status);
1279184554Sattilio
1280184554Sattilio	if (status == 0xffffffff ||	/* device likely went away */
1281184554Sattilio		status == 0)		/* not for us */
128299264Smux		return;
1283184554Sattilio
1284184554Sattilio	sc->interrupts++;
1285184554Sattilio
1286184554Sattilio	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1287184554Sattilio		return;
1288184554Sattilio
1289184554Sattilio	if (status & CNT_PPE_AF)
129099264Smux		rt_cnt_ppe_af(sc);
129199264Smux
129299264Smux	if (status & CNT_GDM_AF)
129399264Smux		rt_cnt_gdm_af(sc);
129499264Smux
1295162647Stegge	if (status & PSE_P2_FC)
129699264Smux		rt_pse_p2_fc(sc);
1297127476Spjd
1298162649Stegge	if (status & GDM_CRC_DROP)
1299162647Stegge		rt_gdm_crc_drop(sc);
130099264Smux
1301212466Skib	if (status & PSE_BUF_DROP)
1302123075Siedowse		rt_pse_buf_drop(sc);
1303123075Siedowse
1304123075Siedowse	if (status & GDM_OTHER_DROP)
1305123075Siedowse		rt_gdm_other_drop(sc);
1306123075Siedowse
1307144055Sjeff	if (status & PSE_P1_FC)
1308191990Sattilio		rt_pse_p1_fc(sc);
130999264Smux
1310138835Sphk	if (status & PSE_P0_FC)
131199264Smux		rt_pse_p0_fc(sc);
131299264Smux
131399264Smux	if (status & PSE_FQ_EMPTY)
131499264Smux		rt_pse_fq_empty(sc);
131599264Smux
131699264Smux	if (status & INT_TX_COHERENT)
131799264Smux		rt_tx_coherent_intr(sc);
1318191990Sattilio
1319191990Sattilio	if (status & INT_RX_COHERENT)
132099264Smux		rt_rx_coherent_intr(sc);
1321174937Simp
1322174937Simp	if (status & RX_DLY_INT)
1323174937Simp		rt_rx_delay_intr(sc);
1324174937Simp
1325174937Simp	if (status & TX_DLY_INT)
1326174937Simp		rt_tx_delay_intr(sc);
1327174937Simp
1328144055Sjeff	if (status & INT_RX_DONE)
1329191990Sattilio		rt_rx_intr(sc);
133099264Smux
1331138835Sphk	if (status & INT_TXQ3_DONE)
133299264Smux		rt_tx_intr(sc, 3);
133399264Smux
133499264Smux	if (status & INT_TXQ2_DONE)
133599264Smux		rt_tx_intr(sc, 2);
133699264Smux
133799264Smux	if (status & INT_TXQ1_DONE)
1338172151Skib		rt_tx_intr(sc, 1);
1339172151Skib
1340212466Skib	if (status & INT_TXQ0_DONE)
1341172151Skib		rt_tx_intr(sc, 0);
1342211930Spjd}
1343172151Skib
1344172151Skibstatic void
134599264Smuxrt_tx_coherent_intr(struct rt_softc *sc)
134699264Smux{
1347233027Skib	uint32_t tmp;
1348233027Skib	int i;
1349162649Stegge
1350184554Sattilio	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n");
1351184554Sattilio
135299264Smux	sc->tx_coherent_interrupts++;
1353184554Sattilio
1354140715Sjeff	/* restart DMA engine */
1355155902Sjeff	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1356175294Sattilio	tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN);
135799264Smux	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
135899264Smux
135999264Smux	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++)
136099264Smux		rt_reset_tx_ring(sc, &sc->tx_ring[i]);
1361155902Sjeff
1362155902Sjeff	for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) {
136399264Smux		RT_WRITE(sc, PDMA_BASE + TX_BASE_PTR(i),
1364155902Sjeff			sc->tx_ring[i].desc_phys_addr);
1365155902Sjeff		RT_WRITE(sc, PDMA_BASE + TX_MAX_CNT(i),
1366131691Salfred			RT_SOFTC_TX_RING_DESC_COUNT);
1367159181Spjd		RT_WRITE(sc, PDMA_BASE + TX_CTX_IDX(i), 0);
136899264Smux	}
136999264Smux
137099264Smux	rt_txrx_enable(sc);
137199264Smux}
1372167551Spjd
1373152176Srodrigc/*
1374152176Srodrigc * rt_rx_coherent_intr
1375152176Srodrigc */
1376152176Srodrigcstatic void
1377152176Srodrigcrt_rx_coherent_intr(struct rt_softc *sc)
1378152176Srodrigc{
1379152176Srodrigc	uint32_t tmp;
1380152176Srodrigc
1381152176Srodrigc	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n");
1382152176Srodrigc
1383152217Srodrigc	sc->rx_coherent_interrupts++;
1384152176Srodrigc
1385152176Srodrigc	/* restart DMA engine */
1386152176Srodrigc	tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1387152176Srodrigc	tmp &= ~(FE_RX_DMA_EN);
1388152176Srodrigc	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1389152176Srodrigc
1390152176Srodrigc	/* init Rx ring */
1391189290Sjamie	rt_reset_rx_ring(sc, &sc->rx_ring);
1392189290Sjamie	RT_WRITE(sc, PDMA_BASE + RX_BASE_PTR0,
1393189290Sjamie		sc->rx_ring.desc_phys_addr);
1394189290Sjamie	RT_WRITE(sc, PDMA_BASE + RX_MAX_CNT0,
1395189290Sjamie		RT_SOFTC_RX_RING_DATA_COUNT);
1396189290Sjamie	RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1397189290Sjamie		RT_SOFTC_RX_RING_DATA_COUNT - 1);
1398189290Sjamie
1399189290Sjamie	rt_txrx_enable(sc);
1400189290Sjamie}
1401189290Sjamie
1402189290Sjamie/*
1403189290Sjamie * rt_rx_intr - a packet received
1404189290Sjamie */
1405189290Sjamiestatic void
1406189290Sjamiert_rx_intr(struct rt_softc *sc)
1407152176Srodrigc{
1408138509Sphk
1409138467Sphk	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n");
1410138467Sphk	sc->rx_interrupts++;
1411138467Sphk	RT_SOFTC_LOCK(sc);
1412138467Sphk
1413138467Sphk	if (!(sc->intr_disable_mask & INT_RX_DONE)) {
1414138467Sphk		rt_intr_disable(sc, INT_RX_DONE);
1415138467Sphk		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
1416138467Sphk	}
1417138467Sphk
1418138467Sphk	sc->intr_pending_mask |= INT_RX_DONE;
1419171852Sjhb	RT_SOFTC_UNLOCK(sc);
1420171852Sjhb}
1421171852Sjhb
1422138467Sphkstatic void
1423138467Sphkrt_rx_delay_intr(struct rt_softc *sc)
1424138467Sphk{
1425171852Sjhb
1426138467Sphk	RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n");
1427171852Sjhb	sc->rx_delay_interrupts++;
1428171852Sjhb}
1429171852Sjhb
1430138467Sphkstatic void
1431171852Sjhbrt_tx_delay_intr(struct rt_softc *sc)
1432171852Sjhb{
1433171852Sjhb
1434171852Sjhb	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n");
1435171852Sjhb	sc->tx_delay_interrupts++;
1436138467Sphk}
1437138467Sphk
1438171852Sjhb/*
1439171852Sjhb * rt_tx_intr - Transsmition of packet done
1440138467Sphk */
1441171852Sjhbstatic void
1442171852Sjhbrt_tx_intr(struct rt_softc *sc, int qid)
1443171852Sjhb{
1444171852Sjhb
1445171852Sjhb	KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT,
1446138467Sphk		("%s: Tx interrupt: invalid qid=%d\n",
1447138467Sphk		 device_get_nameunit(sc->dev), qid));
1448182740Ssimon
1449182740Ssimon	RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid);
1450171852Sjhb
1451138467Sphk	sc->tx_interrupts[qid]++;
1452171852Sjhb	RT_SOFTC_LOCK(sc);
1453171852Sjhb
1454171852Sjhb	if (!(sc->intr_disable_mask & (INT_TXQ0_DONE << qid))) {
1455171852Sjhb		rt_intr_disable(sc, (INT_TXQ0_DONE << qid));
1456214005Smarcel		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1457171852Sjhb	}
1458171852Sjhb
1459214005Smarcel	sc->intr_pending_mask |= (INT_TXQ0_DONE << qid);
1460214005Smarcel	RT_SOFTC_UNLOCK(sc);
1461171852Sjhb}
1462171852Sjhb
1463138467Sphk/*
1464138467Sphk * rt_rx_done_task - run RX task
1465138467Sphk */
146699264Smuxstatic void
146799264Smuxrt_rx_done_task(void *context, int pending)
146899264Smux{
146999264Smux	struct rt_softc *sc;
147099264Smux	struct ifnet *ifp;
147199264Smux	int again;
147299264Smux
147399264Smux	sc = context;
147499264Smux	ifp = sc->ifp;
147599264Smux
147699264Smux	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n");
147799264Smux
147899264Smux	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
147999264Smux		return;
148099264Smux
148199264Smux	sc->intr_pending_mask &= ~INT_RX_DONE;
1482110863Sdes
1483110861Salfred	again = rt_rx_eof(sc, sc->rx_process_limit);
148499264Smux
148599264Smux	RT_SOFTC_LOCK(sc);
1486189290Sjamie
148799264Smux	if ((sc->intr_pending_mask & INT_RX_DONE) || again) {
148899264Smux		RT_DPRINTF(sc, RT_DEBUG_RX,
148999264Smux		    "Rx done task: scheduling again\n");
149099264Smux		taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task);
149199264Smux	} else {
149299264Smux		rt_intr_enable(sc, INT_RX_DONE);
149399264Smux	}
149499264Smux
149599264Smux	RT_SOFTC_UNLOCK(sc);
149699264Smux}
1497189290Sjamie
1498152735Srodrigc/*
1499152735Srodrigc * rt_tx_done_task - check for pending TX task in all queues
1500152735Srodrigc */
1501152735Srodrigcstatic void
1502152735Srodrigcrt_tx_done_task(void *context, int pending)
1503152735Srodrigc{
1504152735Srodrigc	struct rt_softc *sc;
1505152735Srodrigc	struct ifnet *ifp;
1506189290Sjamie	uint32_t intr_mask;
1507189290Sjamie	int i;
1508189290Sjamie
1509189290Sjamie	sc = context;
1510152735Srodrigc	ifp = sc->ifp;
1511152735Srodrigc
1512152735Srodrigc	RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n");
1513152735Srodrigc
1514138467Sphk	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1515138467Sphk		return;
1516138467Sphk
1517138467Sphk	for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) {
1518138467Sphk		if (sc->intr_pending_mask & (INT_TXQ0_DONE << i)) {
1519138467Sphk			sc->intr_pending_mask &= ~(INT_TXQ0_DONE << i);
1520138467Sphk			rt_tx_eof(sc, &sc->tx_ring[i]);
1521138467Sphk		}
1522138467Sphk	}
1523189290Sjamie
1524189290Sjamie	sc->tx_timer = 0;
1525189290Sjamie
1526138467Sphk	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1527138467Sphk
1528138467Sphk	intr_mask = (
1529138467Sphk		INT_TXQ3_DONE |
1530138467Sphk		INT_TXQ2_DONE |
1531166681Scognet		INT_TXQ1_DONE |
1532138467Sphk		INT_TXQ0_DONE);
1533138467Sphk
1534138467Sphk	RT_SOFTC_LOCK(sc);
1535138467Sphk
1536224290Smckusick	rt_intr_enable(sc, ~sc->intr_pending_mask &
1537224290Smckusick	    (sc->intr_disable_mask & intr_mask));
1538138467Sphk
1539138467Sphk	if (sc->intr_pending_mask & intr_mask) {
1540138467Sphk		RT_DPRINTF(sc, RT_DEBUG_TX,
1541138467Sphk		    "Tx done task: scheduling again\n");
1542138467Sphk		taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task);
1543189290Sjamie	}
1544138467Sphk
1545138467Sphk	RT_SOFTC_UNLOCK(sc);
1546138467Sphk
1547138467Sphk	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1548138467Sphk		rt_start(ifp);
1549138467Sphk}
1550138467Sphk
1551138467Sphk/*
1552138467Sphk * rt_periodic_task - run periodic task
1553138467Sphk */
1554138467Sphkstatic void
1555138467Sphkrt_periodic_task(void *context, int pending)
1556138467Sphk{
1557138467Sphk	struct rt_softc *sc;
1558138467Sphk	struct ifnet *ifp;
1559138467Sphk
1560138467Sphk	sc = context;
1561138467Sphk	ifp = sc->ifp;
1562138467Sphk
1563138467Sphk	RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n",
1564138467Sphk	    sc->periodic_round);
1565138467Sphk
1566189290Sjamie	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1567175024Srodrigc		return;
1568175024Srodrigc
1569138467Sphk	RT_SOFTC_LOCK(sc);
1570138467Sphk	sc->periodic_round++;
1571138467Sphk	rt_update_stats(sc);
1572138467Sphk
1573138467Sphk	if ((sc->periodic_round % 10) == 0) {
1574138467Sphk		rt_update_raw_counters(sc);
1575138467Sphk		rt_watchdog(sc);
1576138467Sphk	}
1577138467Sphk
1578138509Sphk	RT_SOFTC_UNLOCK(sc);
1579189290Sjamie	callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc);
1580189290Sjamie}
1581189290Sjamie
1582189290Sjamie/*
1583189290Sjamie * rt_rx_eof - check for frames that done by DMA engine and pass it into
1584189290Sjamie * network subsystem.
1585189290Sjamie */
1586189290Sjamiestatic int
1587189290Sjamiert_rx_eof(struct rt_softc *sc, int limit)
1588189290Sjamie{
1589189290Sjamie	struct ifnet *ifp;
1590189290Sjamie	struct rt_softc_rx_ring *ring;
1591189290Sjamie	struct rt_rxdesc *desc;
1592189290Sjamie	struct rt_softc_rx_data *data;
1593189290Sjamie	struct mbuf *m, *mnew;
1594189290Sjamie	bus_dma_segment_t segs[1];
1595189290Sjamie	bus_dmamap_t dma_map;
1596189290Sjamie	uint32_t index, desc_flags;
1597189290Sjamie	int error, nsegs, len, nframes;
1598189290Sjamie
1599189290Sjamie	ifp = sc->ifp;
1600189290Sjamie	ring = &sc->rx_ring;
1601189290Sjamie
1602189290Sjamie	nframes = 0;
1603189290Sjamie
1604189290Sjamie	while (limit != 0) {
1605189290Sjamie		index = RT_READ(sc, PDMA_BASE + RX_DRX_IDX0);
1606189290Sjamie		if (ring->cur == index)
1607189290Sjamie			break;
1608189290Sjamie
1609189290Sjamie		desc = &ring->desc[ring->cur];
1610189290Sjamie		data = &ring->data[ring->cur];
1611189290Sjamie
1612189290Sjamie		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1613189290Sjamie		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1614189290Sjamie
1615189290Sjamie#ifdef IF_RT_DEBUG
1616189290Sjamie		if ( sc->debug & RT_DEBUG_RX ) {
1617189290Sjamie			printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc);
1618189290Sjamie		        hexdump(desc, 16, 0, 0);
1619189290Sjamie			printf("-----------------------------------\n");
1620189290Sjamie		}
1621189290Sjamie#endif
1622189290Sjamie
1623189290Sjamie		/* XXX Sometime device don`t set DDONE bit */
1624189290Sjamie#ifdef DDONE_FIXED
1625189290Sjamie		if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) {
1626189290Sjamie			RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n");
1627189290Sjamie			break;
1628189290Sjamie		}
1629189290Sjamie#endif
1630189290Sjamie
1631189290Sjamie		len = le16toh(desc->sdl0) & 0x3fff;
1632189290Sjamie		RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len);
1633189290Sjamie
1634189290Sjamie		nframes++;
1635189290Sjamie
1636189290Sjamie		mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1637189290Sjamie		    MJUMPAGESIZE);
1638189290Sjamie		if (mnew == NULL) {
1639189290Sjamie			sc->rx_mbuf_alloc_errors++;
164099264Smux			ifp->if_ierrors++;
164199264Smux			goto skip;
164299264Smux		}
164399264Smux
164499264Smux		mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE;
164599264Smux
164699264Smux		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
164799264Smux		    ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT);
164899264Smux		if (error != 0) {
164999264Smux			RT_DPRINTF(sc, RT_DEBUG_RX,
165099264Smux			    "could not load Rx mbuf DMA map: "
165199264Smux			    "error=%d, nsegs=%d\n",
165299264Smux			    error, nsegs);
165399264Smux
165499264Smux			m_freem(mnew);
165599264Smux
165699264Smux			sc->rx_mbuf_dmamap_errors++;
1657110863Sdes			ifp->if_ierrors++;
1658110861Salfred
165999264Smux			goto skip;
166099264Smux		}
1661189290Sjamie
166299264Smux		KASSERT(nsegs == 1, ("%s: too many DMA segments",
166399264Smux			device_get_nameunit(sc->dev)));
166499264Smux
166599264Smux		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
166699264Smux			BUS_DMASYNC_POSTREAD);
166799264Smux		bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
166899264Smux
166999264Smux		dma_map = data->dma_map;
1670131551Sphk		data->dma_map = ring->spare_dma_map;
1671131551Sphk		ring->spare_dma_map = dma_map;
1672235626Smckusick
1673235626Smckusick		bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1674235626Smckusick			BUS_DMASYNC_PREREAD);
1675235626Smckusick
1676131551Sphk		m = data->m;
1677131551Sphk		desc_flags = desc->src;
1678235626Smckusick
1679235626Smckusick		data->m = mnew;
1680131551Sphk		/* Add 2 for proper align of RX IP header */
1681154152Stegge		desc->sdp0 = htole32(segs[0].ds_addr+2);
1682131551Sphk		desc->sdl0 = htole32(segs[0].ds_len-2);
1683131551Sphk		desc->src = 0;
1684131551Sphk		desc->ai = 0;
1685154152Stegge		desc->foe = 0;
1686138087Sphk
1687154152Stegge		RT_DPRINTF(sc, RT_DEBUG_RX,
1688218195Smdf		    "Rx frame: rxdesc flags=0x%08x\n", desc_flags);
1689177528Skib
1690221829Smdf		m->m_pkthdr.rcvif = ifp;
1691177528Skib		/* Add 2 to fix data align, after sdp0 = addr + 2 */
1692177528Skib		m->m_data += 2;
1693154152Stegge		m->m_pkthdr.len = m->m_len = len;
1694167551Spjd
1695154152Stegge		/* check for crc errors */
1696167551Spjd		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1697131551Sphk			/*check for valid checksum*/
1698154152Stegge			if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL|
1699154152Stegge			    RXDSXR_SRC_L4_CSUM_FAIL)) {
1700131551Sphk				RT_DPRINTF(sc, RT_DEBUG_RX,
1701154152Stegge				    "rxdesc: crc error\n");
1702154152Stegge
1703154152Stegge				ifp->if_ierrors++;
1704131551Sphk
1705131551Sphk				if (!(ifp->if_flags & IFF_PROMISC)) {
1706138412Sphk				    m_freem(m);
1707154152Stegge				    goto skip;
1708154152Stegge				}
1709154152Stegge			}
1710154152Stegge			if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) {
1711154152Stegge				m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1712154152Stegge				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1713154152Stegge				m->m_pkthdr.csum_data = 0xffff;
1714154152Stegge			}
1715167551Spjd			m->m_flags &= ~M_HASFCS;
1716154152Stegge		}
1717167551Spjd
1718154152Stegge		(*ifp->if_input)(ifp, m);
1719154152Steggeskip:
1720154152Stegge		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
1721154152Stegge
1722154152Stegge		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1723184599Sattilio			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724154152Stegge
1725154152Stegge		ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT;
1726154152Stegge
1727154152Stegge		limit--;
1728154152Stegge	}
1729154152Stegge
1730154152Stegge	if (ring->cur == 0)
1731154152Stegge		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1732167551Spjd			RT_SOFTC_RX_RING_DATA_COUNT - 1);
1733154152Stegge	else
1734167551Spjd		RT_WRITE(sc, PDMA_BASE + RX_CALC_IDX0,
1735154152Stegge			ring->cur - 1);
1736154152Stegge
1737154152Stegge	RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes);
1738154152Stegge
1739154152Stegge	sc->rx_packets += nframes;
1740154152Stegge
1741184599Sattilio	return (limit == 0);
1742154152Stegge}
1743154152Stegge
1744154152Stegge/*
1745154152Stegge * rt_tx_eof - check for successful transmitted frames and mark their
1746154152Stegge * descriptor as free.
1747154152Stegge */
1748154152Steggestatic void
1749154152Steggert_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
1750154152Stegge{
1751154152Stegge	struct ifnet *ifp;
1752154152Stegge	struct rt_txdesc *desc;
1753154152Stegge	struct rt_softc_tx_data *data;
1754154152Stegge	uint32_t index;
1755154152Stegge	int ndescs, nframes;
1756167551Spjd
1757154152Stegge	ifp = sc->ifp;
1758154152Stegge
1759154152Stegge	ndescs = 0;
1760154152Stegge	nframes = 0;
1761154152Stegge
1762154152Stegge	for (;;) {
1763154152Stegge		index = RT_READ(sc, PDMA_BASE + TX_DTX_IDX(ring->qid));
1764154152Stegge		if (ring->desc_next == index)
1765184599Sattilio			break;
1766154152Stegge
1767154152Stegge		ndescs++;
1768138412Sphk
1769191990Sattilio		desc = &ring->desc[ring->desc_next];
1770138412Sphk
1771138412Sphk		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1772138412Sphk			BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1773191990Sattilio
1774138412Sphk		if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) ||
1775138696Sphk			desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) {
1776138412Sphk			nframes++;
1777138412Sphk
1778138448Sphk			data = &ring->data[ring->data_next];
1779138467Sphk
1780138467Sphk			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
1781138467Sphk				BUS_DMASYNC_POSTWRITE);
1782138467Sphk			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
1783138467Sphk
1784138467Sphk			m_freem(data->m);
1785138467Sphk
1786138467Sphk			data->m = NULL;
1787138467Sphk
1788138448Sphk			ifp->if_opackets++;
1789138448Sphk
1790138448Sphk			RT_SOFTC_TX_RING_LOCK(ring);
1791138448Sphk			ring->data_queued--;
1792138448Sphk			ring->data_next = (ring->data_next + 1) %
1793138448Sphk			    RT_SOFTC_TX_RING_DATA_COUNT;
1794138448Sphk			RT_SOFTC_TX_RING_UNLOCK(ring);
1795138448Sphk		}
1796138448Sphk
1797138448Sphk		desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE);
1798138448Sphk
1799138448Sphk		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
1800138448Sphk			BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1801138448Sphk
1802138448Sphk		RT_SOFTC_TX_RING_LOCK(ring);
1803138448Sphk		ring->desc_queued--;
1804138448Sphk		ring->desc_next = (ring->desc_next + 1) %
1805138448Sphk		    RT_SOFTC_TX_RING_DESC_COUNT;
1806138448Sphk		RT_SOFTC_TX_RING_UNLOCK(ring);
1807138448Sphk	}
1808138448Sphk
1809138448Sphk	RT_DPRINTF(sc, RT_DEBUG_TX,
1810138448Sphk	    "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs,
1811138448Sphk	    nframes);
1812138448Sphk}
1813138448Sphk
1814138448Sphk/*
1815138448Sphk * rt_update_stats - query statistics counters and update related variables.
1816138448Sphk */
1817138448Sphkstatic void
1818138448Sphkrt_update_stats(struct rt_softc *sc)
1819138448Sphk{
1820138448Sphk	struct ifnet *ifp;
1821138448Sphk
1822138448Sphk	ifp = sc->ifp;
1823138448Sphk	RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n");
1824138448Sphk	/* XXX do update stats here */
1825138448Sphk}
1826138448Sphk
1827138448Sphk/*
1828138448Sphk * rt_watchdog - reinit device on watchdog event.
1829138448Sphk */
1830138448Sphkstatic void
1831138448Sphkrt_watchdog(struct rt_softc *sc)
1832138448Sphk{
1833138448Sphk	uint32_t tmp;
1834138448Sphk#ifdef notyet
1835138448Sphk	int ntries;
1836138448Sphk#endif
1837138448Sphk
1838138448Sphk	tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA);
1839138448Sphk
1840138448Sphk	RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n",
1841138448Sphk	    tmp);
1842138448Sphk
1843138448Sphk	/* XXX: do not reset */
1844138448Sphk#ifdef notyet
1845138448Sphk	if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) {
1846138448Sphk		sc->tx_queue_not_empty[0]++;
1847138448Sphk
1848138448Sphk		for (ntries = 0; ntries < 10; ntries++) {
1849138448Sphk			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1850138448Sphk			if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0)
1851138448Sphk				break;
1852181463Sdes
1853138448Sphk			DELAY(1);
1854138448Sphk		}
1855138448Sphk	}
1856138448Sphk
1857138448Sphk	if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) {
1858138448Sphk		sc->tx_queue_not_empty[1]++;
1859138448Sphk
1860138448Sphk		for (ntries = 0; ntries < 10; ntries++) {
1861138448Sphk			tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA);
1862138448Sphk			if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0)
1863138448Sphk				break;
1864138448Sphk
1865138448Sphk			DELAY(1);
1866138448Sphk		}
1867138448Sphk	}
1868138448Sphk#endif
1869138448Sphk}
1870138448Sphk
1871138448Sphk/*
1872138448Sphk * rt_update_raw_counters - update counters.
1873138448Sphk */
1874138448Sphkstatic void
1875138448Sphkrt_update_raw_counters(struct rt_softc *sc)
1876138448Sphk{
1877138448Sphk
1878138448Sphk	sc->tx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0);
1879138448Sphk	sc->tx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0);
1880138448Sphk	sc->tx_skip	+= RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0);
1881138448Sphk	sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0);
1882138448Sphk
1883138448Sphk	sc->rx_bytes	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0);
1884138448Sphk	sc->rx_packets	+= RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0);
1885138448Sphk	sc->rx_crc_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0);
1886138448Sphk	sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0);
1887138448Sphk	sc->rx_long_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0);
1888138448Sphk	sc->rx_phy_err	+= RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0);
1889138448Sphk	sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0);
1890138448Sphk}
1891138448Sphk
1892138448Sphkstatic void
1893138448Sphkrt_intr_enable(struct rt_softc *sc, uint32_t intr_mask)
1894138448Sphk{
1895138448Sphk	uint32_t tmp;
1896138448Sphk
1897176283Syar	sc->intr_disable_mask &= ~intr_mask;
1898138448Sphk	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1899138448Sphk	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1900138448Sphk}
1901138448Sphk
1902138448Sphkstatic void
1903138448Sphkrt_intr_disable(struct rt_softc *sc, uint32_t intr_mask)
1904138448Sphk{
1905138448Sphk	uint32_t tmp;
1906138448Sphk
1907138448Sphk	sc->intr_disable_mask |= intr_mask;
1908138448Sphk	tmp = sc->intr_enable_mask & ~sc->intr_disable_mask;
1909138448Sphk	RT_WRITE(sc, GE_PORT_BASE + FE_INT_ENABLE, tmp);
1910138448Sphk}
1911138448Sphk
1912138448Sphk/*
1913138448Sphk * rt_txrx_enable - enable TX/RX DMA
1914138448Sphk */
1915138448Sphkstatic int
1916138448Sphkrt_txrx_enable(struct rt_softc *sc)
1917138448Sphk{
1918138448Sphk	struct ifnet *ifp;
1919138448Sphk	uint32_t tmp;
1920138448Sphk	int ntries;
1921138448Sphk
1922138448Sphk	ifp = sc->ifp;
1923138448Sphk
1924138448Sphk	/* enable Tx/Rx DMA engine */
1925138448Sphk	for (ntries = 0; ntries < 200; ntries++) {
1926138448Sphk		tmp = RT_READ(sc, PDMA_BASE + PDMA_GLO_CFG);
1927138448Sphk		if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)))
1928141634Sphk			break;
1929138448Sphk
1930138448Sphk		DELAY(1000);
1931138448Sphk	}
1932138448Sphk
1933138448Sphk	if (ntries == 200) {
1934138448Sphk		device_printf(sc->dev, "timeout waiting for DMA engine\n");
1935138448Sphk		return (-1);
1936138448Sphk	}
1937138448Sphk
1938138448Sphk	DELAY(50);
1939138448Sphk
1940138448Sphk	tmp |= FE_TX_WB_DDONE |	FE_RX_DMA_EN | FE_TX_DMA_EN;
1941138448Sphk	RT_WRITE(sc, PDMA_BASE + PDMA_GLO_CFG, tmp);
1942138448Sphk
1943138448Sphk	/* XXX set Rx filter */
1944138448Sphk	return (0);
1945138448Sphk}
1946230725Smckusick
1947138448Sphk/*
1948138448Sphk * rt_alloc_rx_ring - allocate RX DMA ring buffer
1949138448Sphk */
1950138448Sphkstatic int
1951138448Sphkrt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
1952138448Sphk{
1953138448Sphk	struct rt_rxdesc *desc;
1954138448Sphk	struct rt_softc_rx_data *data;
1955138448Sphk	bus_dma_segment_t segs[1];
1956138448Sphk	int i, nsegs, error;
1957138448Sphk
1958138448Sphk	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1959138448Sphk		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1960138448Sphk		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1,
1961138448Sphk		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1962138448Sphk		0, NULL, NULL, &ring->desc_dma_tag);
1963138448Sphk	if (error != 0)	{
1964138448Sphk		device_printf(sc->dev,
1965138448Sphk		    "could not create Rx desc DMA tag\n");
1966138448Sphk		goto fail;
1967138448Sphk	}
1968138448Sphk
1969138448Sphk	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
1970138448Sphk	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
1971138448Sphk	if (error != 0) {
1972138448Sphk		device_printf(sc->dev,
1973138448Sphk		    "could not allocate Rx desc DMA memory\n");
1974138448Sphk		goto fail;
1975138448Sphk	}
1976138448Sphk
1977138448Sphk	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
1978138448Sphk		ring->desc,
1979138448Sphk		RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc),
1980138448Sphk		rt_dma_map_addr, &ring->desc_phys_addr, 0);
1981138448Sphk	if (error != 0) {
1982138448Sphk		device_printf(sc->dev, "could not load Rx desc DMA map\n");
1983138448Sphk		goto fail;
1984176283Syar	}
1985138448Sphk
1986138448Sphk	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
1987138448Sphk	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1988138448Sphk		MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL,
1989138448Sphk		&ring->data_dma_tag);
1990138448Sphk	if (error != 0)	{
1991213664Skib		device_printf(sc->dev,
1992213664Skib		    "could not create Rx data DMA tag\n");
1993213664Skib		goto fail;
1994213664Skib	}
1995213664Skib
1996213664Skib	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
1997213664Skib		desc = &ring->desc[i];
1998213664Skib		data = &ring->data[i];
1999
2000		error = bus_dmamap_create(ring->data_dma_tag, 0,
2001		    &data->dma_map);
2002		if (error != 0)	{
2003			device_printf(sc->dev, "could not create Rx data DMA "
2004			    "map\n");
2005			goto fail;
2006		}
2007
2008		data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
2009		    MJUMPAGESIZE);
2010		if (data->m == NULL) {
2011			device_printf(sc->dev, "could not allocate Rx mbuf\n");
2012			error = ENOMEM;
2013			goto fail;
2014		}
2015
2016		data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE;
2017
2018		error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag,
2019		    data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT);
2020		if (error != 0)	{
2021			device_printf(sc->dev,
2022			    "could not load Rx mbuf DMA map\n");
2023			goto fail;
2024		}
2025
2026		KASSERT(nsegs == 1, ("%s: too many DMA segments",
2027			device_get_nameunit(sc->dev)));
2028
2029		/* Add 2 for proper align of RX IP header */
2030		desc->sdp0 = htole32(segs[0].ds_addr+2);
2031		desc->sdl0 = htole32(segs[0].ds_len-2);
2032	}
2033
2034	error = bus_dmamap_create(ring->data_dma_tag, 0,
2035	    &ring->spare_dma_map);
2036	if (error != 0) {
2037		device_printf(sc->dev,
2038		    "could not create Rx spare DMA map\n");
2039		goto fail;
2040	}
2041
2042	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2043		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2044	return (0);
2045
2046fail:
2047	rt_free_rx_ring(sc, ring);
2048	return (error);
2049}
2050
2051/*
2052 * rt_reset_rx_ring - reset RX ring buffer
2053 */
2054static void
2055rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2056{
2057	struct rt_rxdesc *desc;
2058	int i;
2059
2060	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2061		desc = &ring->desc[i];
2062		desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE);
2063	}
2064
2065	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2066		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2067	ring->cur = 0;
2068}
2069
2070/*
2071 * rt_free_rx_ring - free memory used by RX ring buffer
2072 */
2073static void
2074rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
2075{
2076	struct rt_softc_rx_data *data;
2077	int i;
2078
2079	if (ring->desc != NULL) {
2080		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2081			BUS_DMASYNC_POSTWRITE);
2082		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2083		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2084			ring->desc_dma_map);
2085	}
2086
2087	if (ring->desc_dma_tag != NULL)
2088		bus_dma_tag_destroy(ring->desc_dma_tag);
2089
2090	for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) {
2091		data = &ring->data[i];
2092
2093		if (data->m != NULL) {
2094			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2095				BUS_DMASYNC_POSTREAD);
2096			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2097			m_freem(data->m);
2098		}
2099
2100		if (data->dma_map != NULL)
2101			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2102	}
2103
2104	if (ring->spare_dma_map != NULL)
2105		bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map);
2106
2107	if (ring->data_dma_tag != NULL)
2108		bus_dma_tag_destroy(ring->data_dma_tag);
2109}
2110
2111/*
2112 * rt_alloc_tx_ring - allocate TX ring buffer
2113 */
2114static int
2115rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid)
2116{
2117	struct rt_softc_tx_data *data;
2118	int error, i;
2119
2120	mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF);
2121
2122	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2123		BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2124		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1,
2125		RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc),
2126		0, NULL, NULL, &ring->desc_dma_tag);
2127	if (error != 0) {
2128		device_printf(sc->dev,
2129		    "could not create Tx desc DMA tag\n");
2130		goto fail;
2131	}
2132
2133	error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc,
2134	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map);
2135	if (error != 0)	{
2136		device_printf(sc->dev,
2137		    "could not allocate Tx desc DMA memory\n");
2138		goto fail;
2139	}
2140
2141	error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map,
2142	    ring->desc,	(RT_SOFTC_TX_RING_DESC_COUNT *
2143	    sizeof(struct rt_txdesc)), rt_dma_map_addr,
2144	    &ring->desc_phys_addr, 0);
2145	if (error != 0) {
2146		device_printf(sc->dev, "could not load Tx desc DMA map\n");
2147		goto fail;
2148	}
2149
2150	ring->desc_queued = 0;
2151	ring->desc_cur = 0;
2152	ring->desc_next = 0;
2153
2154	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2155	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2156	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1,
2157	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2158	    0, NULL, NULL, &ring->seg0_dma_tag);
2159	if (error != 0) {
2160		device_printf(sc->dev,
2161		    "could not create Tx seg0 DMA tag\n");
2162		goto fail;
2163	}
2164
2165	error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0,
2166	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map);
2167	if (error != 0) {
2168		device_printf(sc->dev,
2169		    "could not allocate Tx seg0 DMA memory\n");
2170		goto fail;
2171	}
2172
2173	error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map,
2174	    ring->seg0,
2175	    RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE,
2176	    rt_dma_map_addr, &ring->seg0_phys_addr, 0);
2177	if (error != 0) {
2178		device_printf(sc->dev, "could not load Tx seg0 DMA map\n");
2179		goto fail;
2180	}
2181
2182	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0,
2183	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2184	    MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL,
2185	    &ring->data_dma_tag);
2186	if (error != 0) {
2187		device_printf(sc->dev,
2188		    "could not create Tx data DMA tag\n");
2189		goto fail;
2190	}
2191
2192	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2193		data = &ring->data[i];
2194
2195		error = bus_dmamap_create(ring->data_dma_tag, 0,
2196		    &data->dma_map);
2197		if (error != 0) {
2198			device_printf(sc->dev, "could not create Tx data DMA "
2199			    "map\n");
2200			goto fail;
2201		}
2202	}
2203
2204	ring->data_queued = 0;
2205	ring->data_cur = 0;
2206	ring->data_next = 0;
2207
2208	ring->qid = qid;
2209	return (0);
2210
2211fail:
2212	rt_free_tx_ring(sc, ring);
2213	return (error);
2214}
2215
2216/*
2217 * rt_reset_tx_ring - reset TX ring buffer to empty state
2218 */
2219static void
2220rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2221{
2222	struct rt_softc_tx_data *data;
2223	struct rt_txdesc *desc;
2224	int i;
2225
2226	for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) {
2227		desc = &ring->desc[i];
2228
2229		desc->sdl0 = 0;
2230		desc->sdl1 = 0;
2231	}
2232
2233	ring->desc_queued = 0;
2234	ring->desc_cur = 0;
2235	ring->desc_next = 0;
2236
2237	bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2238		BUS_DMASYNC_PREWRITE);
2239
2240	bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2241		BUS_DMASYNC_PREWRITE);
2242
2243	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2244		data = &ring->data[i];
2245
2246		if (data->m != NULL) {
2247			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2248				BUS_DMASYNC_POSTWRITE);
2249			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2250			m_freem(data->m);
2251			data->m = NULL;
2252		}
2253	}
2254
2255	ring->data_queued = 0;
2256	ring->data_cur = 0;
2257	ring->data_next = 0;
2258}
2259
2260/*
2261 * rt_free_tx_ring - free RX ring buffer
2262 */
2263static void
2264rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring)
2265{
2266	struct rt_softc_tx_data *data;
2267	int i;
2268
2269	if (ring->desc != NULL) {
2270		bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map,
2271			BUS_DMASYNC_POSTWRITE);
2272		bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map);
2273		bus_dmamem_free(ring->desc_dma_tag, ring->desc,
2274			ring->desc_dma_map);
2275	}
2276
2277	if (ring->desc_dma_tag != NULL)
2278		bus_dma_tag_destroy(ring->desc_dma_tag);
2279
2280	if (ring->seg0 != NULL) {
2281		bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map,
2282			BUS_DMASYNC_POSTWRITE);
2283		bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map);
2284		bus_dmamem_free(ring->seg0_dma_tag, ring->seg0,
2285			ring->seg0_dma_map);
2286	}
2287
2288	if (ring->seg0_dma_tag != NULL)
2289		bus_dma_tag_destroy(ring->seg0_dma_tag);
2290
2291	for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) {
2292		data = &ring->data[i];
2293
2294		if (data->m != NULL) {
2295			bus_dmamap_sync(ring->data_dma_tag, data->dma_map,
2296				BUS_DMASYNC_POSTWRITE);
2297			bus_dmamap_unload(ring->data_dma_tag, data->dma_map);
2298			m_freem(data->m);
2299		}
2300
2301		if (data->dma_map != NULL)
2302			bus_dmamap_destroy(ring->data_dma_tag, data->dma_map);
2303	}
2304
2305	if (ring->data_dma_tag != NULL)
2306		bus_dma_tag_destroy(ring->data_dma_tag);
2307
2308	mtx_destroy(&ring->lock);
2309}
2310
2311/*
2312 * rt_dma_map_addr - get address of busdma segment
2313 */
2314static void
2315rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2316{
2317	if (error != 0)
2318		return;
2319
2320	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2321
2322	*(bus_addr_t *) arg = segs[0].ds_addr;
2323}
2324
2325/*
2326 * rt_sysctl_attach - attach sysctl nodes for NIC counters.
2327 */
2328static void
2329rt_sysctl_attach(struct rt_softc *sc)
2330{
2331	struct sysctl_ctx_list *ctx;
2332	struct sysctl_oid *tree;
2333	struct sysctl_oid *stats;
2334
2335	ctx = device_get_sysctl_ctx(sc->dev);
2336	tree = device_get_sysctl_tree(sc->dev);
2337
2338	/* statistic counters */
2339	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2340	    "stats", CTLFLAG_RD, 0, "statistic");
2341
2342	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2343	    "interrupts", CTLFLAG_RD, &sc->interrupts, 0,
2344	    "all interrupts");
2345
2346	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2347	    "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts,
2348	    0, "Tx coherent interrupts");
2349
2350	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2351	    "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts,
2352	    0, "Rx coherent interrupts");
2353
2354	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2355	    "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts, 0,
2356	    "Rx interrupts");
2357
2358	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2359	    "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, 0,
2360	    "Rx delay interrupts");
2361
2362	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2363	    "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], 0,
2364	    "Tx AC3 interrupts");
2365
2366	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2367	    "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], 0,
2368	    "Tx AC2 interrupts");
2369
2370	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2371	    "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], 0,
2372	    "Tx AC1 interrupts");
2373
2374	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2375	    "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], 0,
2376	    "Tx AC0 interrupts");
2377
2378	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2379	    "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts,
2380	    0, "Tx delay interrupts");
2381
2382	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2383	    "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued,
2384	    0, "Tx AC3 descriptors queued");
2385
2386	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2387	    "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued,
2388	    0, "Tx AC3 data queued");
2389
2390	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2391	    "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued,
2392	    0, "Tx AC2 descriptors queued");
2393
2394	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2395	    "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued,
2396	    0, "Tx AC2 data queued");
2397
2398	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2399	    "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued,
2400	    0, "Tx AC1 descriptors queued");
2401
2402	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2403	    "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued,
2404	    0, "Tx AC1 data queued");
2405
2406	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2407	    "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued,
2408	    0, "Tx AC0 descriptors queued");
2409
2410	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2411	    "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued,
2412	    0, "Tx AC0 data queued");
2413
2414	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2415	    "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3],
2416	    0, "Tx AC3 data queue full");
2417
2418	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2419	    "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2],
2420	    0, "Tx AC2 data queue full");
2421
2422	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2423	    "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1],
2424	    0, "Tx AC1 data queue full");
2425
2426	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2427	    "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0],
2428	    0, "Tx AC0 data queue full");
2429
2430	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2431	    "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts,
2432	    0, "Tx watchdog timeouts");
2433
2434	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2435	    "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, 0,
2436	    "Tx defragmented packets");
2437
2438	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2439	    "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, 0,
2440	    "no Tx descriptors available");
2441
2442	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2443	    "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors,
2444	    0, "Rx mbuf allocation errors");
2445
2446	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2447	    "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors,
2448	    0, "Rx mbuf DMA mapping errors");
2449
2450	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2451	    "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0],
2452	    0, "Tx queue 0 not empty");
2453
2454	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2455	    "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1],
2456	    0, "Tx queue 1 not empty");
2457
2458	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2459	    "rx_packets", CTLFLAG_RD, &sc->rx_packets, 0,
2460	    "Rx packets");
2461
2462	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2463	    "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, 0,
2464	    "Rx CRC errors");
2465
2466	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2467	    "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, 0,
2468	    "Rx PHY errors");
2469
2470	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2471	    "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, 0,
2472	    "Rx duplicate packets");
2473
2474	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2475	    "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, 0,
2476	    "Rx FIFO overflows");
2477
2478	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2479	    "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, 0,
2480	    "Rx bytes");
2481
2482	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2483	    "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, 0,
2484	    "Rx too long frame errors");
2485
2486	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2487	    "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, 0,
2488	    "Rx too short frame errors");
2489
2490	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2491	    "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, 0,
2492	    "Tx bytes");
2493	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2494	    "tx_packets", CTLFLAG_RD, &sc->tx_packets, 0,
2495	    "Tx packets");
2496	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2497	    "tx_skip", CTLFLAG_RD, &sc->tx_skip, 0,
2498	    "Tx skip count for GDMA ports");
2499	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO,
2500	    "tx_collision", CTLFLAG_RD, &sc->tx_collision, 0,
2501	    "Tx collision count for GDMA ports");
2502}
2503
2504#ifdef IF_RT_PHY_SUPPORT
2505static int
2506rt_miibus_readreg(device_t dev, int phy, int reg)
2507{
2508	struct rt_softc *sc = device_get_softc(dev);
2509
2510	/*
2511	 * PSEUDO_PHYAD is a special value for indicate switch attached.
2512	 * No one PHY use PSEUDO_PHYAD (0x1e) address.
2513	 */
2514	if (phy == 31) {
2515		/* Fake PHY ID for bfeswitch attach */
2516		switch (reg) {
2517		case MII_BMSR:
2518			return (BMSR_EXTSTAT|BMSR_MEDIAMASK);
2519		case MII_PHYIDR1:
2520			return (0x40);		/* As result of faking */
2521		case MII_PHYIDR2:		/* PHY will detect as */
2522			return (0x6250);		/* bfeswitch */
2523		}
2524	}
2525
2526	/* Wait prev command done if any */
2527	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2528	RT_WRITE(sc, MDIO_ACCESS,
2529	    MDIO_CMD_ONGO ||
2530	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2531	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK));
2532	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2533
2534	return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK);
2535}
2536
2537static int
2538rt_miibus_writereg(device_t dev, int phy, int reg, int val)
2539{
2540	struct rt_softc *sc = device_get_softc(dev);
2541
2542	/* Wait prev command done if any */
2543	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2544	RT_WRITE(sc, MDIO_ACCESS,
2545	    MDIO_CMD_ONGO || MDIO_CMD_WR ||
2546	    ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) ||
2547	    ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) ||
2548	    (val & MDIO_PHY_DATA_MASK));
2549	while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO);
2550
2551	return (0);
2552}
2553
2554void
2555rt_miibus_statchg(device_t dev)
2556{
2557	struct rt_softc *sc = device_get_softc(dev);
2558	struct mii_data *mii;
2559
2560	mii = device_get_softc(sc->rt_miibus);
2561
2562	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2563	    (IFM_ACTIVE | IFM_AVALID)) {
2564		switch (IFM_SUBTYPE(mii->mii_media_active)) {
2565		case IFM_10_T:
2566		case IFM_100_TX:
2567			/* XXX check link here */
2568			sc->flags |= 1;
2569			break;
2570		default:
2571			break;
2572		}
2573	}
2574}
2575#endif /* IF_RT_PHY_SUPPORT */
2576
2577static device_method_t rt_dev_methods[] =
2578{
2579	DEVMETHOD(device_probe, rt_probe),
2580	DEVMETHOD(device_attach, rt_attach),
2581	DEVMETHOD(device_detach, rt_detach),
2582	DEVMETHOD(device_shutdown, rt_shutdown),
2583	DEVMETHOD(device_suspend, rt_suspend),
2584	DEVMETHOD(device_resume, rt_resume),
2585
2586#ifdef IF_RT_PHY_SUPPORT
2587	/* MII interface */
2588	DEVMETHOD(miibus_readreg,	rt_miibus_readreg),
2589	DEVMETHOD(miibus_writereg,	rt_miibus_writereg),
2590	DEVMETHOD(miibus_statchg,	rt_miibus_statchg),
2591#endif
2592
2593	DEVMETHOD_END
2594};
2595
2596static driver_t rt_driver =
2597{
2598	"rt",
2599	rt_dev_methods,
2600	sizeof(struct rt_softc)
2601};
2602
2603static devclass_t rt_dev_class;
2604
2605DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0);
2606MODULE_DEPEND(rt, ether, 1, 1, 1);
2607MODULE_DEPEND(rt, miibus, 1, 1, 1);
2608
2609