1/*	$NetBSD: ralink_eth.c,v 1.26 2022/09/29 07:00:47 skrll Exp $	*/
2/*-
3 * Copyright (c) 2011 CradlePoint Technology, Inc.
4 * All rights reserved.
5 *
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY CRADLEPOINT TECHNOLOGY, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/* ralink_eth.c -- Ralink Ethernet Driver */
30
31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: ralink_eth.c,v 1.26 2022/09/29 07:00:47 skrll Exp $");
33
34#include <sys/param.h>
35#include <sys/bus.h>
36#include <sys/callout.h>
37#include <sys/device.h>
38#include <sys/endian.h>
39#include <sys/errno.h>
40#include <sys/ioctl.h>
41#include <sys/intr.h>
42#include <sys/kernel.h>
43#include <sys/mbuf.h>
44#include <sys/socket.h>
45#include <sys/systm.h>
46
47#include <uvm/uvm_extern.h>
48
49#include <net/if.h>
50#include <net/if_dl.h>
51#include <net/if_media.h>
52#include <net/if_ether.h>
53#include <net/if_vlanvar.h>
54
55#include <net/bpf.h>
56
57#include <dev/mii/mii.h>
58#include <dev/mii/miivar.h>
59#include <dev/mii/mii_bitbang.h>
60
61#include <mips/ralink/ralink_var.h>
62#include <mips/ralink/ralink_reg.h>
63#if 0
64#define CPDEBUG				/* XXX TMP DEBUG FIXME */
65#define RALINK_ETH_DEBUG		/* XXX TMP DEBUG FIXME */
66#define ENABLE_RALINK_DEBUG_ERROR 1
67#define ENABLE_RALINK_DEBUG_MISC  1
68#define ENABLE_RALINK_DEBUG_INFO  1
69#define ENABLE_RALINK_DEBUG_FORCE 1
70#define ENABLE_RALINK_DEBUG_REG   1
71#endif
72#include <mips/ralink/ralink_debug.h>
73
74
75/* PDMA RX Descriptor Format */
76struct ralink_rx_desc {
77	uint32_t data_ptr;
78	uint32_t rxd_info1;
79#define RXD_LEN1(x)	(((x) >> 0) & 0x3fff)
80#define RXD_LAST1	(1 << 14)
81#define RXD_LEN0(x)	(((x) >> 16) & 0x3fff)
82#define RXD_LAST0	(1 << 30)
83#define RXD_DDONE	(1 << 31)
84	uint32_t unused;
85	uint32_t rxd_info2;
86#define RXD_FOE(x)	(((x) >> 0) & 0x3fff)
87#define RXD_FVLD	(1 << 14)
88#define RXD_INFO(x)	(((x) >> 16) & 0xff)
89#define RXD_PORT(x)	(((x) >> 24) & 0x7)
90#define RXD_INFO_CPU	(1 << 27)
91#define RXD_L4_FAIL	(1 << 28)
92#define RXD_IP_FAIL	(1 << 29)
93#define RXD_L4_VLD	(1 << 30)
94#define RXD_IP_VLD	(1 << 31)
95};
96
97/* PDMA TX Descriptor Format */
98struct ralink_tx_desc {
99	uint32_t data_ptr0;
100	uint32_t txd_info1;
101#define TXD_LEN1(x)	(((x) & 0x3fff) << 0)
102#define TXD_LAST1	(1 << 14)
103#define TXD_BURST	(1 << 15)
104#define TXD_LEN0(x)	(((x) & 0x3fff) << 16)
105#define TXD_LAST0	(1 << 30)
106#define TXD_DDONE	(1 << 31)
107	uint32_t data_ptr1;
108	uint32_t txd_info2;
109#define TXD_VIDX(x)	(((x) & 0xf) << 0)
110#define TXD_VPRI(x)	(((x) & 0x7) << 4)
111#define TXD_VEN		(1 << 7)
112#define TXD_SIDX(x)	(((x) & 0xf) << 8)
113#define TXD_SEN(x)	(1 << 13)
114#define TXD_QN(x)	(((x) & 0x7) << 16)
115#define TXD_PN(x)	(((x) & 0x7) << 24)
116#define  TXD_PN_CPU	0
117#define  TXD_PN_GDMA1	1
118#define  TXD_PN_GDMA2	2
119#define TXD_TCP_EN	(1 << 29)
120#define TXD_UDP_EN	(1 << 30)
121#define TXD_IP_EN	(1 << 31)
122};
123
124/* TODO:
125 * try to scale number of descriptors swith size of memory
126 * these numbers may have a significant impact on performance/memory/mbuf usage
127 */
128#if RTMEMSIZE >= 64
129#define RALINK_ETH_NUM_RX_DESC 256
130#define RALINK_ETH_NUM_TX_DESC 256
131#else
132#define RALINK_ETH_NUM_RX_DESC 64
133#define RALINK_ETH_NUM_TX_DESC 64
134#endif
135/* maximum segments per packet */
136#define RALINK_ETH_MAX_TX_SEGS 1
137
138/* define a struct for ease of dma memory allocation */
139struct ralink_descs {
140	struct ralink_rx_desc rxdesc[RALINK_ETH_NUM_RX_DESC];
141	struct ralink_tx_desc txdesc[RALINK_ETH_NUM_TX_DESC];
142};
143
144/* Software state for transmit jobs. */
145struct ralink_eth_txstate {
146	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
147	bus_dmamap_t txs_dmamap;	/* our DMA map */
148	int txs_idx;			/* the index in txdesc ring that */
149					/*  this state is tracking */
150	SIMPLEQ_ENTRY(ralink_eth_txstate) txs_q;
151};
152
153SIMPLEQ_HEAD(ralink_eth_txsq, ralink_eth_txstate);
154
155/*
156 * Software state for receive jobs.
157 */
158struct ralink_eth_rxstate {
159	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
160	bus_dmamap_t rxs_dmamap;	/* our DMA map */
161};
162
163typedef struct ralink_eth_softc {
164	device_t sc_dev;		/* generic device information */
165	bus_space_tag_t sc_memt;	/* bus space tag */
166	bus_space_handle_t sc_sy_memh;	/* handle at SYSCTL_BASE */
167	bus_space_handle_t sc_fe_memh;	/* handle at FRAME_ENGINE_BASE */
168	bus_space_handle_t sc_sw_memh;	/* handle at ETH_SW_BASE */
169	int sc_sy_size;			/* size of Sysctl regs space */
170	int sc_fe_size;			/* size of Frame Engine regs space */
171	int sc_sw_size;			/* size of Ether Switch regs space */
172	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
173	void *sc_ih;			/* interrupt handle */
174
175	/* tx/rx dma mapping */
176	bus_dma_segment_t sc_dseg;
177	int sc_ndseg;
178	bus_dmamap_t sc_pdmamap;	/* PDMA DMA map */
179#define sc_pdma sc_pdmamap->dm_segs[0].ds_addr
180
181	struct ralink_descs *sc_descs;
182#define sc_rxdesc sc_descs->rxdesc
183#define sc_txdesc sc_descs->txdesc
184
185#define RALINK_MIN_BUF 64
186	char ralink_zero_buf[RALINK_MIN_BUF];
187
188	struct ralink_eth_txstate sc_txstate[RALINK_ETH_NUM_TX_DESC];
189	struct ralink_eth_rxstate sc_rxstate[RALINK_ETH_NUM_RX_DESC];
190
191	struct ralink_eth_txsq sc_txfreeq;	/* free Tx descsofts */
192	struct ralink_eth_txsq sc_txdirtyq;	/* dirty Tx descsofts */
193
194	struct ethercom sc_ethercom;		/* ethernet common data */
195	u_int sc_pending_tx;
196
197	/* mii */
198	struct mii_data sc_mii;
199	struct callout sc_tick_callout;
200
201	struct evcnt sc_evcnt_spurious_intr;
202	struct evcnt sc_evcnt_rxintr;
203	struct evcnt sc_evcnt_rxintr_skip_len;
204	struct evcnt sc_evcnt_rxintr_skip_tag_none;
205	struct evcnt sc_evcnt_rxintr_skip_tag_inval;
206	struct evcnt sc_evcnt_rxintr_skip_inact;
207	struct evcnt sc_evcnt_txintr;
208	struct evcnt sc_evcnt_input;
209	struct evcnt sc_evcnt_output;
210	struct evcnt sc_evcnt_watchdog;
211	struct evcnt sc_evcnt_wd_reactivate;
212	struct evcnt sc_evcnt_wd_tx;
213	struct evcnt sc_evcnt_wd_spurious;
214	struct evcnt sc_evcnt_add_rxbuf_hdr_fail;
215	struct evcnt sc_evcnt_add_rxbuf_mcl_fail;
216} ralink_eth_softc_t;
217
218/* alignment so the IP header is aligned */
219#define RALINK_ETHER_ALIGN 2
220
221/* device functions */
222static int  ralink_eth_match(device_t, cfdata_t, void *);
223static void ralink_eth_attach(device_t, device_t, void *);
224static int  ralink_eth_detach(device_t, int);
225static int  ralink_eth_activate(device_t, enum devact);
226
227/* local driver functions */
228static void ralink_eth_hw_init(ralink_eth_softc_t *);
229static int  ralink_eth_intr(void *);
230static void ralink_eth_reset(ralink_eth_softc_t *);
231static void ralink_eth_rxintr(ralink_eth_softc_t *);
232static void ralink_eth_txintr(ralink_eth_softc_t *);
233
234/* partition functions */
235static int  ralink_eth_enable(ralink_eth_softc_t *);
236static void ralink_eth_disable(ralink_eth_softc_t *);
237
238/* ifnet functions */
239static int  ralink_eth_init(struct ifnet *);
240static void ralink_eth_rxdrain(ralink_eth_softc_t *);
241static void ralink_eth_stop(struct ifnet *, int);
242static int  ralink_eth_add_rxbuf(ralink_eth_softc_t *, int);
243static void ralink_eth_start(struct ifnet *);
244static void ralink_eth_watchdog(struct ifnet *);
245static int  ralink_eth_ioctl(struct ifnet *, u_long, void *);
246
247/* mii functions */
248#if defined(RT3050) || defined(RT3052)
249static void ralink_eth_mdio_enable(ralink_eth_softc_t *, bool);
250#endif
251static void ralink_eth_mii_statchg(struct ifnet *);
252static void ralink_eth_mii_tick(void *);
253static int  ralink_eth_mii_read(device_t, int, int, uint16_t *);
254static int  ralink_eth_mii_write(device_t, int, int, uint16_t);
255
256CFATTACH_DECL_NEW(reth, sizeof(struct ralink_eth_softc),
257    ralink_eth_match, ralink_eth_attach, ralink_eth_detach,
258    ralink_eth_activate);
259
260static inline uint32_t
261sy_read(const ralink_eth_softc_t *sc, const bus_size_t off)
262{
263	return bus_space_read_4(sc->sc_memt, sc->sc_sy_memh, off);
264}
265
266static inline void
267sy_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val)
268{
269	bus_space_write_4(sc->sc_memt, sc->sc_sy_memh, off, val);
270}
271
272static inline uint32_t
273fe_read(const ralink_eth_softc_t *sc, const bus_size_t off)
274{
275	return bus_space_read_4(sc->sc_memt, sc->sc_fe_memh, off);
276}
277
278static inline void
279fe_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val)
280{
281	bus_space_write_4(sc->sc_memt, sc->sc_fe_memh, off, val);
282}
283
284static inline uint32_t
285sw_read(const ralink_eth_softc_t *sc, const bus_size_t off)
286{
287	return bus_space_read_4(sc->sc_memt, sc->sc_sw_memh, off);
288}
289
290static inline void
291sw_write(const ralink_eth_softc_t *sc, const bus_size_t off, const uint32_t val)
292{
293	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, off, val);
294}
295
296/*
297 * ralink_eth_match
298 */
299int
300ralink_eth_match(device_t parent, cfdata_t cf, void *aux)
301{
302	return 1;
303}
304
305/*
306 * ralink_eth_attach
307 */
308void
309ralink_eth_attach(device_t parent, device_t self, void *aux)
310{
311	ralink_eth_softc_t * const sc = device_private(self);
312	const struct mainbus_attach_args *ma = aux;
313	struct mii_data *mii = &sc->sc_mii;
314	int error;
315	int i;
316
317	aprint_naive(": Ralink Ethernet\n");
318	aprint_normal(": Ralink Ethernet\n");
319
320	evcnt_attach_dynamic(&sc->sc_evcnt_spurious_intr, EVCNT_TYPE_INTR, NULL,
321	    device_xname(self), "spurious intr");
322	evcnt_attach_dynamic(&sc->sc_evcnt_rxintr, EVCNT_TYPE_INTR, NULL,
323	    device_xname(self), "rxintr");
324	evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_len,
325	    EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
326	    device_xname(self), "rxintr skip: no room for VLAN header");
327	evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_none,
328	    EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
329	    device_xname(self), "rxintr skip: no VLAN tag");
330	evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_tag_inval,
331	    EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
332	    device_xname(self), "rxintr skip: invalid VLAN tag");
333	evcnt_attach_dynamic(&sc->sc_evcnt_rxintr_skip_inact,
334	    EVCNT_TYPE_INTR, &sc->sc_evcnt_rxintr,
335	    device_xname(self), "rxintr skip: partition inactive");
336	evcnt_attach_dynamic(&sc->sc_evcnt_txintr, EVCNT_TYPE_INTR, NULL,
337	    device_xname(self), "txintr");
338	evcnt_attach_dynamic(&sc->sc_evcnt_input, EVCNT_TYPE_INTR, NULL,
339	    device_xname(self), "input");
340	evcnt_attach_dynamic(&sc->sc_evcnt_output, EVCNT_TYPE_INTR, NULL,
341	    device_xname(self), "output");
342	evcnt_attach_dynamic(&sc->sc_evcnt_watchdog, EVCNT_TYPE_INTR, NULL,
343	    device_xname(self), "watchdog");
344	evcnt_attach_dynamic(&sc->sc_evcnt_wd_tx,
345	    EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog,
346	    device_xname(self), "watchdog TX timeout");
347	evcnt_attach_dynamic(&sc->sc_evcnt_wd_spurious,
348	    EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog,
349	    device_xname(self), "watchdog spurious");
350	evcnt_attach_dynamic(&sc->sc_evcnt_wd_reactivate,
351	    EVCNT_TYPE_INTR, &sc->sc_evcnt_watchdog,
352	    device_xname(self), "watchdog reactivate");
353	evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_hdr_fail,
354	    EVCNT_TYPE_INTR, NULL,
355	    device_xname(self), "add rxbuf hdr fail");
356	evcnt_attach_dynamic(&sc->sc_evcnt_add_rxbuf_mcl_fail,
357	    EVCNT_TYPE_INTR, NULL,
358	    device_xname(self), "add rxbuf mcl fail");
359
360	/*
361	 * In order to obtain unique initial Ethernet address on a host,
362	 * do some randomisation using the current uptime.  It's not meant
363	 * for anything but avoiding hard-coding an address.
364	 */
365#ifdef RALINK_ETH_MACADDR
366	uint8_t enaddr[ETHER_ADDR_LEN];
367	ether_aton_r(enaddr, sizeof(enaddr), ___STRING(RALINK_ETH_MACADDR));
368#else
369	uint8_t enaddr[ETHER_ADDR_LEN] = { 0x00, 0x30, 0x44, 0x00, 0x00, 0x00 };
370#endif
371
372	sc->sc_dev = self;
373	sc->sc_dmat = ma->ma_dmat;
374	sc->sc_memt = ma->ma_memt;
375	sc->sc_sy_size = 0x10000;
376	sc->sc_fe_size = 0x10000;
377	sc->sc_sw_size = 0x08000;
378
379	/*
380	 * map the registers
381	 *
382	 * we map the Sysctl, Frame Engine and Ether Switch registers
383	 * separately so we can use the defined register offsets sanely
384	 */
385	if ((error = bus_space_map(sc->sc_memt, RA_SYSCTL_BASE,
386	    sc->sc_sy_size, 0, &sc->sc_sy_memh)) != 0) {
387		aprint_error_dev(self, "unable to map Sysctl registers, "
388		    "error=%d\n", error);
389		goto fail_0a;
390	}
391	if ((error = bus_space_map(sc->sc_memt, RA_FRAME_ENGINE_BASE,
392	    sc->sc_fe_size, 0, &sc->sc_fe_memh)) != 0) {
393		aprint_error_dev(self, "unable to map Frame Engine registers, "
394		    "error=%d\n", error);
395		goto fail_0b;
396	}
397	if ((error = bus_space_map(sc->sc_memt, RA_ETH_SW_BASE,
398	    sc->sc_sw_size, 0, &sc->sc_sw_memh)) != 0) {
399		aprint_error_dev(self, "unable to map Ether Switch registers, "
400		    "error=%d\n", error);
401		goto fail_0c;
402	}
403
404	/* Allocate desc structures, and create & load the DMA map for them */
405	if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ralink_descs),
406	    PAGE_SIZE, 0, &sc->sc_dseg, 1, &sc->sc_ndseg, 0)) != 0) {
407		aprint_error_dev(self, "unable to allocate transmit descs, "
408		    "error=%d\n", error);
409		goto fail_1;
410	}
411
412	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg,
413	    sizeof(struct ralink_descs), (void **)&sc->sc_descs,
414	    BUS_DMA_COHERENT)) != 0) {
415		aprint_error_dev(self, "unable to map control data, "
416		    "error=%d\n", error);
417		goto fail_2;
418	}
419
420	if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ralink_descs),
421	    1, sizeof(struct ralink_descs), 0, 0, &sc->sc_pdmamap)) != 0) {
422		aprint_error_dev(self, "unable to create control data DMA map, "
423		    "error=%d\n", error);
424		goto fail_3;
425	}
426
427	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_pdmamap, sc->sc_descs,
428	    sizeof(struct ralink_descs), NULL, 0)) != 0) {
429		aprint_error_dev(self, "unable to load control data DMA map, "
430		    "error=%d\n", error);
431		goto fail_4;
432	}
433
434	/* Create the transmit buffer DMA maps.  */
435	for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
436		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
437		    RALINK_ETH_MAX_TX_SEGS, MCLBYTES, 0, 0,
438		    &sc->sc_txstate[i].txs_dmamap)) != 0) {
439			aprint_error_dev(self,
440			    "unable to create tx DMA map %d, error=%d\n",
441			    i, error);
442			goto fail_5;
443		}
444	}
445
446	/* Create the receive buffer DMA maps.  */
447	for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
448		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
449		    MCLBYTES, 0, 0, &sc->sc_rxstate[i].rxs_dmamap)) != 0) {
450			aprint_error_dev(self,
451			    "unable to create rx DMA map %d, error=%d\n",
452			    i, error);
453			goto fail_6;
454		}
455		sc->sc_rxstate[i].rxs_mbuf = NULL;
456	}
457
458	/* this is a zero buffer used for zero'ing out short packets */
459	memset(sc->ralink_zero_buf, 0, RALINK_MIN_BUF);
460
461	/* setup some address in hardware */
462	fe_write(sc, RA_FE_GDMA1_MAC_LSB,
463	    (enaddr[5] | (enaddr[4] << 8) |
464	    (enaddr[3] << 16) | (enaddr[2] << 24)));
465	fe_write(sc, RA_FE_GDMA1_MAC_MSB,
466	    (enaddr[1] | (enaddr[0] << 8)));
467
468	/*
469	 * iterate through ports
470	 *  slickrock must use specific non-linear sequence
471	 *  others are linear
472	 */
473	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
474
475	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
476
477	/*
478	 * Initialize our media structures.
479	 * This may probe the PHY, if present.
480	 */
481	mii->mii_ifp = ifp;
482	mii->mii_readreg = ralink_eth_mii_read;
483	mii->mii_writereg = ralink_eth_mii_write;
484	mii->mii_statchg = ralink_eth_mii_statchg;
485	sc->sc_ethercom.ec_mii = mii;
486	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
487	mii_attach(sc->sc_dev, mii, ~0, MII_PHY_ANY, MII_OFFSET_ANY,
488	    MIIF_FORCEANEG | MIIF_DOPAUSE | MIIF_NOISOLATE);
489
490	if (LIST_EMPTY(&mii->mii_phys)) {
491#if 1
492		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T |
493		    IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
494		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T |
495		    IFM_FDX | IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
496#else
497		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
498		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
499#endif
500	} else {
501		/* Ensure we mask ok for the switch multiple phy's */
502		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
503	}
504
505	ifp->if_softc = sc;
506	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
507	ifp->if_init = ralink_eth_init;
508	ifp->if_start = ralink_eth_start;
509	ifp->if_ioctl = ralink_eth_ioctl;
510	ifp->if_stop = ralink_eth_stop;
511	ifp->if_watchdog = ralink_eth_watchdog;
512	IFQ_SET_READY(&ifp->if_snd);
513
514	/* We can support 802.1Q VLAN-sized frames. */
515	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
516
517	/* We support IPV4 CRC Offload */
518	ifp->if_capabilities |=
519	    (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
520	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
521	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx);
522
523	/* Attach the interface. */
524	if_attach(ifp);
525	if_deferred_start_init(ifp, NULL);
526	ether_ifattach(ifp, enaddr);
527
528	/* init our mii ticker */
529	callout_init(&sc->sc_tick_callout, 0);
530	callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc);
531
532	return;
533
534	/*
535	 * Free any resources we've allocated during the failed attach
536	 * attempt.  Do this in reverse order and fall through.
537	 */
538 fail_6:
539	for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
540		if (sc->sc_rxstate[i].rxs_dmamap != NULL)
541			bus_dmamap_destroy(sc->sc_dmat,
542			    sc->sc_rxstate[i].rxs_dmamap);
543	}
544 fail_5:
545	for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
546		if (sc->sc_txstate[i].txs_dmamap != NULL)
547			bus_dmamap_destroy(sc->sc_dmat,
548			    sc->sc_txstate[i].txs_dmamap);
549	}
550	bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap);
551 fail_4:
552	bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap);
553 fail_3:
554	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs,
555	    sizeof(struct ralink_descs));
556 fail_2:
557	bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg);
558 fail_1:
559	bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size);
560 fail_0c:
561	bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size);
562 fail_0b:
563	bus_space_unmap(sc->sc_memt, sc->sc_sy_memh, sc->sc_fe_size);
564 fail_0a:
565	return;
566}
567
568/*
569 * ralink_eth_activate:
570 *
571 *	Handle device activation/deactivation requests.
572 */
573int
574ralink_eth_activate(device_t self, enum devact act)
575{
576	ralink_eth_softc_t * const sc = device_private(self);
577	int error = 0;
578	int s;
579
580	s = splnet();
581	switch (act) {
582	case DVACT_DEACTIVATE:
583		if_deactivate(&sc->sc_ethercom.ec_if);
584		break;
585	}
586	splx(s);
587
588	return error;
589}
590
591/*
592 * ralink_eth_partition_enable
593 */
594static int
595ralink_eth_enable(ralink_eth_softc_t *sc)
596{
597	RALINK_DEBUG_FUNC_ENTRY();
598
599	if (sc->sc_ih != NULL) {
600		RALINK_DEBUG(RALINK_DEBUG_MISC, "%s() already active",
601			__func__);
602		return EALREADY;
603	}
604
605	sc->sc_pending_tx = 0;
606
607	int s = splnet();
608	ralink_eth_hw_init(sc);
609	sc->sc_ih = ra_intr_establish(RA_IRQ_FENGINE,
610	    ralink_eth_intr, sc, 1);
611	splx(s);
612	if (sc->sc_ih == NULL) {
613		RALINK_DEBUG(RALINK_DEBUG_ERROR,
614		    "%s: unable to establish interrupt\n",
615		    device_xname(sc->sc_dev));
616		return EIO;
617	}
618
619	return 0;
620}
621
622/*
623 * ralink_eth_partition_disable
624 */
625static void
626ralink_eth_disable(ralink_eth_softc_t *sc)
627{
628	RALINK_DEBUG_FUNC_ENTRY();
629
630	int s = splnet();
631	ralink_eth_rxdrain(sc);
632	ra_intr_disestablish(sc->sc_ih);
633	sc->sc_ih = NULL;
634
635	/* stop the mii ticker */
636	callout_stop(&sc->sc_tick_callout);
637
638	/* quiesce the block */
639	ralink_eth_reset(sc);
640	splx(s);
641}
642
643/*
644 * ralink_eth_detach
645 */
646static int
647ralink_eth_detach(device_t self, int flags)
648{
649	RALINK_DEBUG_FUNC_ENTRY();
650	ralink_eth_softc_t * const sc = device_private(self);
651	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
652	struct ralink_eth_rxstate *rxs;
653	struct ralink_eth_txstate *txs;
654	int i;
655
656	ralink_eth_disable(sc);
657	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
658	ether_ifdetach(ifp);
659	if_detach(ifp);
660	ifmedia_fini(&sc->sc_mii.mii_media);
661
662	for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
663		rxs = &sc->sc_rxstate[i];
664		if (rxs->rxs_mbuf != NULL) {
665			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
666			m_freem(rxs->rxs_mbuf);
667			rxs->rxs_mbuf = NULL;
668		}
669		bus_dmamap_destroy(sc->sc_dmat, rxs->rxs_dmamap);
670	}
671
672	for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
673		txs = &sc->sc_txstate[i];
674		if (txs->txs_mbuf != NULL) {
675			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
676			m_freem(txs->txs_mbuf);
677			txs->txs_mbuf = NULL;
678		}
679		bus_dmamap_destroy(sc->sc_dmat, txs->txs_dmamap);
680	}
681
682	bus_dmamap_unload(sc->sc_dmat, sc->sc_pdmamap);
683	bus_dmamap_destroy(sc->sc_dmat, sc->sc_pdmamap);
684	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_descs,
685	    sizeof(struct ralink_descs));
686	bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_ndseg);
687
688	bus_space_unmap(sc->sc_memt, sc->sc_sw_memh, sc->sc_sw_size);
689	bus_space_unmap(sc->sc_memt, sc->sc_fe_memh, sc->sc_fe_size);
690
691	return 0;
692}
693
694/*
695 * ralink_eth_reset
696 */
697static void
698ralink_eth_reset(ralink_eth_softc_t *sc)
699{
700	RALINK_DEBUG_FUNC_ENTRY();
701	uint32_t r;
702
703	/* Reset the frame engine */
704	r = sy_read(sc, RA_SYSCTL_RST);
705	r |= RST_FE;
706	sy_write(sc, RA_SYSCTL_RST, r);
707	r ^= RST_FE;
708	sy_write(sc, RA_SYSCTL_RST, r);
709
710	/* Wait until the PDMA is quiescent */
711	for (;;) {
712		r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG);
713		if (r & FE_PDMA_GLOBAL_CFG_RX_DMA_BUSY) {
714			aprint_normal_dev(sc->sc_dev, "RX DMA BUSY\n");
715			continue;
716		}
717		if (r & FE_PDMA_GLOBAL_CFG_TX_DMA_BUSY) {
718			aprint_normal_dev(sc->sc_dev, "TX DMA BUSY\n");
719			continue;
720		}
721		break;
722	}
723}
724
725/*
726 * ralink_eth_hw_init
727 */
728static void
729ralink_eth_hw_init(ralink_eth_softc_t *sc)
730{
731	RALINK_DEBUG_FUNC_ENTRY();
732	struct ralink_eth_txstate *txs;
733	uint32_t r;
734	int i;
735
736	/* reset to a known good state */
737	ralink_eth_reset(sc);
738
739#if defined(RT3050) || defined(RT3052) || defined(MT7628)
740	/* Bring the switch to a sane default state (from linux driver) */
741	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SGC2,
742	    0x00000000);
743	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PFC1,
744	    0x00405555);	/* check VLAN tag on port forward */
745	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VLANI0,
746	    0x00002001);
747	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC0,
748	    0x00001002);
749	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC1,
750	    0x00001001);
751	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_PVIDC2,
752	    0x00001001);
753#if defined(MT7628)
754	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0,
755	    0xffffffff);
756	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0,
757	    0x10007f7f);
758	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2,
759	    0x00007f7f);
760	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2,
761	    0x0002500c);
762#else
763	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_VMSC0,
764	    0xffff417e);
765	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC0,
766	    0x00007f7f);
767	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_POC2,
768	    0x00007f3f);
769	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FTC2,
770	    0x00d6500c);
771#endif
772	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SWGC,
773	    0x0008a301);	/* hashing algorithm=XOR48 */
774				/*  aging interval=300sec  */
775	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_SOCPC,
776	    0x02404040);
777	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPORT,
778	    0x3f502b28);	/* Change polling Ext PHY Addr=0x0 */
779	bus_space_write_4(sc->sc_memt, sc->sc_sw_memh, RA_ETH_SW_FPA,
780	    0x00000000);
781
782	/* do some mii magic  TODO: define these registers/bits */
783	/* lower down PHY 10Mbps mode power */
784	/* select local register */
785	ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000);
786
787	for (i=0; i < 5; i++) {
788		/* set TX10 waveform coefficient */
789		ralink_eth_mii_write(sc->sc_dev, i, 26, 0x1601);
790
791		/* set TX100/TX10 AD/DA current bias */
792		ralink_eth_mii_write(sc->sc_dev, i, 29, 0x7058);
793
794		/* set TX100 slew rate control */
795		ralink_eth_mii_write(sc->sc_dev, i, 30, 0x0018);
796	}
797
798	/* PHY IOT */
799
800	/* select global register */
801	ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x0);
802
803	/* tune TP_IDL tail and head waveform */
804	ralink_eth_mii_write(sc->sc_dev, 0, 22, 0x052f);
805
806	/* set TX10 signal amplitude threshold to minimum */
807	ralink_eth_mii_write(sc->sc_dev, 0, 17, 0x0fe0);
808
809	/* set squelch amplitude to higher threshold */
810	ralink_eth_mii_write(sc->sc_dev, 0, 18, 0x40ba);
811
812	/* longer TP_IDL tail length */
813	ralink_eth_mii_write(sc->sc_dev, 0, 14, 0x65);
814
815	/* select local register */
816	ralink_eth_mii_write(sc->sc_dev, 0, 31, 0x8000);
817#else
818	/* GE1 + GigSW */
819	fe_write(sc, RA_FE_MDIO_CFG1,
820	    MDIO_CFG_PHY_ADDR(0x1f) |
821	    MDIO_CFG_BP_EN |
822	    MDIO_CFG_FORCE_CFG |
823	    MDIO_CFG_SPEED(MDIO_CFG_SPEED_1000M) |
824	    MDIO_CFG_FULL_DUPLEX |
825	    MDIO_CFG_FC_TX |
826	    MDIO_CFG_FC_RX |
827	    MDIO_CFG_TX_CLK_MODE(MDIO_CFG_TX_CLK_MODE_3COM));
828#endif
829
830	/*
831	 * TODO: QOS - RT3052 has 4 TX queues for QOS,
832	 * forgoing for 1 for simplicity
833	 */
834
835	/*
836	 * Allocate DMA accessible memory for TX/RX descriptor rings
837	 */
838
839	/* Initialize the TX queues. */
840	SIMPLEQ_INIT(&sc->sc_txfreeq);
841	SIMPLEQ_INIT(&sc->sc_txdirtyq);
842
843	/* Initialize the TX descriptor ring. */
844	memset(sc->sc_txdesc, 0, sizeof(sc->sc_txdesc));
845	for (i = 0; i < RALINK_ETH_NUM_TX_DESC; i++) {
846
847		sc->sc_txdesc[i].txd_info1 = TXD_LAST0 | TXD_DDONE;
848
849		/* setup the freeq as well */
850		txs = &sc->sc_txstate[i];
851		txs->txs_mbuf = NULL;
852		txs->txs_idx = i;
853		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
854	}
855
856	/*
857	 * Flush the TX descriptors
858	 *  - TODO: can we just access descriptors via KSEG1
859	 *    to avoid the flush?
860	 */
861	bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
862	    (int)&sc->sc_txdesc - (int)sc->sc_descs, sizeof(sc->sc_txdesc),
863	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
864
865	/* Initialize the RX descriptor ring */
866	memset(sc->sc_rxdesc, 0, sizeof(sc->sc_rxdesc));
867	for (i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
868		if (ralink_eth_add_rxbuf(sc, i)) {
869			panic("Can't allocate rx mbuf\n");
870		}
871	}
872
873	/*
874	 * Flush the RX descriptors
875	 * - TODO: can we just access descriptors via KSEG1
876	 *   to avoid the flush?
877	 */
878	bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
879	    (int)&sc->sc_rxdesc - (int)sc->sc_descs, sizeof(sc->sc_rxdesc),
880	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
881
882	/* Clear the PDMA state */
883	r = fe_read(sc, RA_FE_PDMA_GLOBAL_CFG);
884	r &= 0xff;
885	fe_write(sc, RA_FE_PDMA_GLOBAL_CFG, r);
886	(void) fe_read(sc, RA_FE_PDMA_GLOBAL_CFG);
887
888#if !defined(MT7628)
889	/* Setup the PDMA VLAN ID's */
890	fe_write(sc, RA_FE_VLAN_ID_0001, 0x00010000);
891	fe_write(sc, RA_FE_VLAN_ID_0203, 0x00030002);
892	fe_write(sc, RA_FE_VLAN_ID_0405, 0x00050004);
893	fe_write(sc, RA_FE_VLAN_ID_0607, 0x00070006);
894	fe_write(sc, RA_FE_VLAN_ID_0809, 0x00090008);
895	fe_write(sc, RA_FE_VLAN_ID_1011, 0x000b000a);
896	fe_write(sc, RA_FE_VLAN_ID_1213, 0x000d000c);
897	fe_write(sc, RA_FE_VLAN_ID_1415, 0x000f000e);
898#endif
899
900	/* Give the TX and TX rings to the chip. */
901	fe_write(sc, RA_FE_PDMA_TX0_PTR,
902	    htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_txdesc)));
903	fe_write(sc, RA_FE_PDMA_TX0_COUNT, htole32(RALINK_ETH_NUM_TX_DESC));
904	fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, 0);
905#if !defined(MT7628)
906	fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_TX0);
907#endif
908
909	fe_write(sc, RA_FE_PDMA_RX0_PTR,
910	    htole32(MIPS_KSEG0_TO_PHYS(&sc->sc_rxdesc)));
911	fe_write(sc, RA_FE_PDMA_RX0_COUNT, htole32(RALINK_ETH_NUM_RX_DESC));
912	fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX,
913	    htole32(RALINK_ETH_NUM_RX_DESC - 1));
914#if !defined(MT7628)
915	fe_write(sc, RA_FE_PDMA_RESET_IDX, PDMA_RST_RX0);
916#endif
917	fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX,
918	    htole32(RALINK_ETH_NUM_RX_DESC - 1));
919
920	/* Start PDMA */
921	fe_write(sc, RA_FE_PDMA_GLOBAL_CFG,
922	    FE_PDMA_GLOBAL_CFG_TX_WB_DDONE |
923	    FE_PDMA_GLOBAL_CFG_RX_DMA_EN |
924	    FE_PDMA_GLOBAL_CFG_TX_DMA_EN |
925	    FE_PDMA_GLOBAL_CFG_BURST_SZ_4);
926
927	/* Setup the clock for the Frame Engine */
928#if defined(MT7628)
929	fe_write(sc, RA_FE_SDM_CON, 0x8100);
930#else
931	fe_write(sc, RA_FE_GLOBAL_CFG,
932	    FE_GLOBAL_CFG_EXT_VLAN(0x8100) |
933	    FE_GLOBAL_CFG_US_CLK(RA_BUS_FREQ / 1000000) |
934	    FE_GLOBAL_CFG_L2_SPACE(0x8));
935#endif
936
937	/* Turn on all interrupts */
938#if defined(MT7628)
939	fe_write(sc, RA_FE_INT_MASK,
940	    RA_FE_INT_RX_DONE_INT1 |
941	    RA_FE_INT_RX_DONE_INT0 |
942	    RA_FE_INT_TX_DONE_INT3 |
943	    RA_FE_INT_TX_DONE_INT2 |
944	    RA_FE_INT_TX_DONE_INT1 |
945	    RA_FE_INT_TX_DONE_INT0);
946#else
947	fe_write(sc, RA_FE_INT_ENABLE,
948	    FE_INT_RX | FE_INT_TX3 | FE_INT_TX2 | FE_INT_TX1 | FE_INT_TX0);
949#endif
950
951	/*
952	 * Configure GDMA forwarding
953	 * - default all packets to CPU
954	 * - Turn on auto-CRC
955	 */
956#if 0
957	fe_write(sc, RA_FE_GDMA1_FWD_CFG,
958	    (FE_GDMA_FWD_CFG_DIS_TX_CRC | FE_GDMA_FWD_CFG_DIS_TX_PAD));
959#endif
960
961#if !defined(MT7628)
962	fe_write(sc, RA_FE_GDMA1_FWD_CFG,
963	    FE_GDMA_FWD_CFG_JUMBO_LEN(MCLBYTES/1024) |
964	    FE_GDMA_FWD_CFG_STRIP_RX_CRC |
965	    FE_GDMA_FWD_CFG_IP4_CRC_EN |
966	    FE_GDMA_FWD_CFG_TCP_CRC_EN |
967	    FE_GDMA_FWD_CFG_UDP_CRC_EN);
968#endif
969
970	/* CDMA also needs CRCs turned on */
971#if !defined(MT7628)
972	r = fe_read(sc, RA_FE_CDMA_CSG_CFG);
973	r |= (FE_CDMA_CSG_CFG_IP4_CRC_EN | FE_CDMA_CSG_CFG_UDP_CRC_EN |
974	    FE_CDMA_CSG_CFG_TCP_CRC_EN);
975	fe_write(sc, RA_FE_CDMA_CSG_CFG, r);
976#endif
977
978	/* Configure Flow Control Thresholds */
979#if defined(MT7628)
980	sw_write(sc, RA_ETH_SW_FCT0,
981	    RA_ETH_SW_FCT0_FC_RLS_TH(0xc8) |
982	    RA_ETH_SW_FCT0_FC_SET_TH(0xa0) |
983	    RA_ETH_SW_FCT0_DROP_RLS_TH(0x78) |
984	    RA_ETH_SW_FCT0_DROP_SET_TH(0x50));
985	sw_write(sc, RA_ETH_SW_FCT1,
986	    RA_ETH_SW_FCT1_PORT_TH(0x14));
987#elif defined(RT3883)
988	fe_write(sc, RA_FE_PSE_FQ_CFG,
989	    FE_PSE_FQ_MAX_COUNT(0xff) |
990	    FE_PSE_FQ_FC_RELEASE(0x90) |
991	    FE_PSE_FQ_FC_ASSERT(0x80));
992#else
993	fe_write(sc, RA_FE_PSE_FQ_CFG,
994	    FE_PSE_FQ_MAX_COUNT(0x80) |
995	    FE_PSE_FQ_FC_RELEASE(0x50) |
996	    FE_PSE_FQ_FC_ASSERT(0x40));
997#endif
998
999#ifdef RALINK_ETH_DEBUG
1000#ifdef RA_FE_MDIO_CFG1
1001	printf("FE_MDIO_CFG1: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG1));
1002#endif
1003#ifdef RA_FE_MDIO_CFG2
1004	printf("FE_MDIO_CFG2: 0x%08x\n", fe_read(sc, RA_FE_MDIO_CFG2));
1005#endif
1006	printf("FE_PDMA_TX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_TX0_PTR));
1007	printf("FE_PDMA_TX0_COUNT: %08x\n",
1008	    fe_read(sc, RA_FE_PDMA_TX0_COUNT));
1009	printf("FE_PDMA_TX0_CPU_IDX: %08x\n",
1010	    fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX));
1011	printf("FE_PDMA_TX0_DMA_IDX: %08x\n",
1012	    fe_read(sc, RA_FE_PDMA_TX0_DMA_IDX));
1013	printf("FE_PDMA_RX0_PTR: %08x\n", fe_read(sc, RA_FE_PDMA_RX0_PTR));
1014	printf("FE_PDMA_RX0_COUNT: %08x\n",
1015	    fe_read(sc, RA_FE_PDMA_RX0_COUNT));
1016	printf("FE_PDMA_RX0_CPU_IDX: %08x\n",
1017	    fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX));
1018	printf("FE_PDMA_RX0_DMA_IDX: %08x\n",
1019	    fe_read(sc, RA_FE_PDMA_RX0_DMA_IDX));
1020	printf("FE_PDMA_GLOBAL_CFG: %08x\n",
1021	    fe_read(sc, RA_FE_PDMA_GLOBAL_CFG));
1022#ifdef RA_FE_GLOBAL_CFG
1023	printf("FE_GLOBAL_CFG: %08x\n", fe_read(sc, RA_FE_GLOBAL_CFG));
1024#endif
1025#ifdef RA_FE_GDMA1_FWD_CFG
1026	printf("FE_GDMA1_FWD_CFG: %08x\n",
1027	    fe_read(sc, RA_FE_GDMA1_FWD_CFG));
1028#endif
1029#ifdef RA_FE_CDMA_CSG_CFG
1030	printf("FE_CDMA_CSG_CFG: %08x\n", fe_read(sc, RA_FE_CDMA_CSG_CFG));
1031#endif
1032#ifdef RA_FE_PSE_FQ_CFG
1033	printf("FE_PSE_FQ_CFG: %08x\n", fe_read(sc, RA_FE_PSE_FQ_CFG));
1034#endif
1035#endif
1036
1037	/* Force PSE Reset to get everything finalized */
1038#if defined(MT7628)
1039#else
1040	fe_write(sc, RA_FE_GLOBAL_RESET, FE_GLOBAL_RESET_PSE);
1041	fe_write(sc, RA_FE_GLOBAL_RESET, 0);
1042#endif
1043}
1044
1045/*
1046 * ralink_eth_init
1047 */
1048static int
1049ralink_eth_init(struct ifnet *ifp)
1050{
1051	RALINK_DEBUG_FUNC_ENTRY();
1052	ralink_eth_softc_t * const sc = ifp->if_softc;
1053	int error;
1054
1055	error = ralink_eth_enable(sc);
1056	if (!error) {
1057		/* Note that the interface is now running. */
1058		ifp->if_flags |= IFF_RUNNING;
1059	}
1060
1061	return error;
1062}
1063
1064/*
1065 * ralink_eth_rxdrain
1066 *
1067 *  Drain the receive queue.
1068 */
1069static void
1070ralink_eth_rxdrain(ralink_eth_softc_t *sc)
1071{
1072	RALINK_DEBUG_FUNC_ENTRY();
1073
1074	for (int i = 0; i < RALINK_ETH_NUM_RX_DESC; i++) {
1075		struct ralink_eth_rxstate *rxs = &sc->sc_rxstate[i];
1076		if (rxs->rxs_mbuf != NULL) {
1077			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1078			m_freem(rxs->rxs_mbuf);
1079			rxs->rxs_mbuf = NULL;
1080		}
1081	}
1082}
1083
1084/*
1085 * ralink_eth_stop
1086 */
1087static void
1088ralink_eth_stop(struct ifnet *ifp, int disable)
1089{
1090	RALINK_DEBUG_FUNC_ENTRY();
1091	ralink_eth_softc_t * const sc = ifp->if_softc;
1092
1093	ralink_eth_disable(sc);
1094
1095	/* Mark the interface down and cancel the watchdog timer.  */
1096	ifp->if_flags &= ~IFF_RUNNING;
1097	ifp->if_timer = 0;
1098}
1099
1100/*
1101 * ralink_eth_add_rxbuf
1102 */
1103static int
1104ralink_eth_add_rxbuf(ralink_eth_softc_t *sc, int idx)
1105{
1106	RALINK_DEBUG_FUNC_ENTRY();
1107	struct ralink_eth_rxstate * const rxs = &sc->sc_rxstate[idx];
1108	struct mbuf *m;
1109	int error;
1110
1111	MGETHDR(m, M_DONTWAIT, MT_DATA);
1112	if (m == NULL) {
1113		printf("MGETHDR failed\n");
1114		sc->sc_evcnt_add_rxbuf_hdr_fail.ev_count++;
1115		return ENOBUFS;
1116	}
1117
1118	MCLGET(m, M_DONTWAIT);
1119	if ((m->m_flags & M_EXT) == 0) {
1120		m_freem(m);
1121		printf("MCLGET failed\n");
1122		sc->sc_evcnt_add_rxbuf_mcl_fail.ev_count++;
1123		return ENOBUFS;
1124	}
1125
1126	m->m_data = m->m_ext.ext_buf;
1127	rxs->rxs_mbuf = m;
1128
1129	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
1130	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1131	if (error) {
1132		aprint_error_dev(sc->sc_dev, "can't load rx DMA map %d, "
1133		    "error=%d\n", idx, error);
1134		panic(__func__);  /* XXX */
1135	}
1136
1137	sc->sc_rxdesc[idx].data_ptr = MIPS_KSEG0_TO_PHYS(
1138	    rxs->rxs_dmamap->dm_segs[0].ds_addr + RALINK_ETHER_ALIGN);
1139	sc->sc_rxdesc[idx].rxd_info1 = RXD_LAST0;
1140
1141	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1142	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1143
1144	return 0;
1145}
1146
1147
1148/*
1149 * ralink_eth_start
1150 */
1151static void
1152ralink_eth_start(struct ifnet *ifp)
1153{
1154	RALINK_DEBUG_FUNC_ENTRY();
1155	ralink_eth_softc_t * const sc = ifp->if_softc;
1156	struct mbuf *m0, *m = NULL;
1157	struct ralink_eth_txstate *txs;
1158	bus_dmamap_t dmamap;
1159	int tx_cpu_idx;
1160	int error;
1161	int s;
1162
1163	if ((ifp->if_flags & IFF_RUNNING) == 0)
1164		return;
1165
1166	s = splnet();
1167
1168	tx_cpu_idx = fe_read(sc, RA_FE_PDMA_TX0_CPU_IDX);
1169
1170	/*
1171	 * Loop through the send queue, setting up transmit descriptors
1172	 * until we drain the queue, or use up all available
1173	 * transmit descriptors.
1174	 */
1175	while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL) {
1176		/* Grab a packet off the queue.  */
1177		IFQ_POLL(&ifp->if_snd, m0);
1178		if (m0 == NULL)
1179			break;
1180
1181		dmamap = txs->txs_dmamap;
1182
1183		if (m0->m_pkthdr.len < RALINK_MIN_BUF) {
1184			int padlen = 64 - m0->m_pkthdr.len;
1185			m_copyback(m0, m0->m_pkthdr.len, padlen,
1186			    sc->ralink_zero_buf);
1187			/* TODO : need some checking here */
1188		}
1189
1190		/*
1191		 * Do we need to align the buffer
1192		 * or does the DMA map load fail?
1193		 */
1194		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1195		    BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
1196
1197			/* Allocate a new mbuf for re-alignment */
1198			MGETHDR(m, M_DONTWAIT, MT_DATA);
1199			if (m == NULL) {
1200				aprint_error_dev(sc->sc_dev,
1201				    "unable to allocate aligned Tx mbuf\n");
1202				break;
1203			}
1204			MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1205			if (m0->m_pkthdr.len > MHLEN) {
1206				MCLGET(m, M_DONTWAIT);
1207				if ((m->m_flags & M_EXT) == 0) {
1208					aprint_error_dev(sc->sc_dev,
1209					    "unable to allocate Tx cluster\n");
1210					m_freem(m);
1211					break;
1212				}
1213			}
1214			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1215			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1216			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m,
1217			    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1218			if (error) {
1219				aprint_error_dev(sc->sc_dev,
1220				    "unable to load Tx buffer error=%d\n",
1221				    error);
1222				m_freem(m);
1223				break;
1224			}
1225		}
1226
1227		IFQ_DEQUEUE(&ifp->if_snd, m0);
1228		/* did we copy the buffer out already? */
1229		if (m != NULL) {
1230			m_freem(m0);
1231			m0 = m;
1232		}
1233
1234		/* Sync the DMA map. */
1235		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1236		    BUS_DMASYNC_PREWRITE);
1237
1238		/* Initialize the transmit descriptor */
1239		sc->sc_txdesc[tx_cpu_idx].data_ptr0 =
1240		    MIPS_KSEG0_TO_PHYS(dmamap->dm_segs[0].ds_addr);
1241		sc->sc_txdesc[tx_cpu_idx].txd_info1 =
1242		    TXD_LEN0(dmamap->dm_segs[0].ds_len) | TXD_LAST0;
1243		sc->sc_txdesc[tx_cpu_idx].txd_info2 =
1244		    TXD_QN(3) | TXD_PN(TXD_PN_GDMA1);
1245		sc->sc_txdesc[tx_cpu_idx].txd_info2 = TXD_QN(3) |
1246		    TXD_PN(TXD_PN_GDMA1) | TXD_VEN |
1247		    // TXD_VIDX(pt->vlan_id) |
1248		    TXD_TCP_EN | TXD_UDP_EN | TXD_IP_EN;
1249
1250		RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1251		    tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr0,
1252		    sc->sc_txdesc[tx_cpu_idx].data_ptr0);
1253		RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1254		    tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info1,
1255		    sc->sc_txdesc[tx_cpu_idx].txd_info1);
1256		RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1257		    tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].data_ptr1,
1258		    sc->sc_txdesc[tx_cpu_idx].data_ptr1);
1259		RALINK_DEBUG(RALINK_DEBUG_REG,"+tx(%d) 0x%08x: 0x%08x\n",
1260		    tx_cpu_idx, (int)&sc->sc_txdesc[tx_cpu_idx].txd_info2,
1261		    sc->sc_txdesc[tx_cpu_idx].txd_info2);
1262
1263		/* sync the descriptor we're using. */
1264		bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
1265		    (int)&sc->sc_txdesc[tx_cpu_idx] - (int)sc->sc_descs,
1266		    sizeof(struct ralink_tx_desc),
1267		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1268
1269		/*
1270		 * Store a pointer to the packet so we can free it later,
1271		 * and remember what txdirty will be once the packet is
1272		 * done.
1273		 */
1274		txs->txs_mbuf = m0;
1275		sc->sc_pending_tx++;
1276		if (txs->txs_idx != tx_cpu_idx) {
1277			panic("txs_idx doesn't match %d != %d\n",
1278			    txs->txs_idx, tx_cpu_idx);
1279		}
1280
1281		SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1282		SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1283
1284		/* Pass the packet to any BPF listeners. */
1285		bpf_mtap(ifp, m0, BPF_D_OUT);
1286
1287		/* Set a watchdog timer in case the chip flakes out. */
1288		ifp->if_timer = 5;
1289
1290		tx_cpu_idx = (tx_cpu_idx + 1) % RALINK_ETH_NUM_TX_DESC;
1291
1292		/* Write back the tx_cpu_idx */
1293		fe_write(sc, RA_FE_PDMA_TX0_CPU_IDX, tx_cpu_idx);
1294	}
1295
1296	splx(s);
1297}
1298
1299/*
1300 * ralink_eth_watchdog
1301 *
1302 *	Watchdog timer handler.
1303 */
1304static void
1305ralink_eth_watchdog(struct ifnet *ifp)
1306{
1307	RALINK_DEBUG_FUNC_ENTRY();
1308	ralink_eth_softc_t * const sc = ifp->if_softc;
1309	bool doing_transmit;
1310
1311	sc->sc_evcnt_watchdog.ev_count++;
1312	doing_transmit = !SIMPLEQ_EMPTY(&sc->sc_txdirtyq);
1313
1314	if (doing_transmit) {
1315		RALINK_DEBUG(RALINK_DEBUG_ERROR, "%s: transmit timeout\n",
1316		    ifp->if_xname);
1317		if_statinc(ifp, if_oerrors);
1318		sc->sc_evcnt_wd_tx.ev_count++;
1319	} else {
1320		RALINK_DEBUG(RALINK_DEBUG_ERROR,
1321		    "%s: spurious watchdog timeout\n", ifp->if_xname);
1322		sc->sc_evcnt_wd_spurious.ev_count++;
1323		return;
1324	}
1325
1326	sc->sc_evcnt_wd_reactivate.ev_count++;
1327	const int s = splnet();
1328	/* deactivate the active partitions, retaining the active information */
1329	ralink_eth_disable(sc);
1330	ralink_eth_enable(sc);
1331	splx(s);
1332
1333	/* Try to get more packets going. */
1334	ralink_eth_start(ifp);
1335}
1336
1337/*
1338 * ralink_eth_ioctl
1339 *
1340 *	Handle control requests from the operator.
1341 */
1342static int
1343ralink_eth_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1344{
1345	RALINK_DEBUG_FUNC_ENTRY();
1346	struct ifdrv * const ifd = (struct ifdrv *) data;
1347	ralink_eth_softc_t * const sc = ifp->if_softc;
1348	int s, error = 0;
1349
1350	RALINK_DEBUG(RALINK_DEBUG_INFO, "ifp: %p  cmd: %lu  data: %p\n",
1351		ifp, cmd, data);
1352
1353	s = splnet();
1354
1355	switch (cmd) {
1356	case SIOCSDRVSPEC:
1357		switch (ifd->ifd_cmd) {
1358#if 0
1359		case ETH_SWITCH_CMD_PORT_MODE:
1360			/* len parameter is the mode */
1361			pt->mode = (int) ifd->ifd_len;
1362			ralink_eth_configure_switch(pt->sc_reth);
1363			break;
1364#endif
1365		default:
1366			error = EINVAL;
1367		}
1368		break;
1369	default:
1370		error = ether_ioctl(ifp, cmd, data);
1371		if (error == ENETRESET) {
1372			if (ifp->if_flags & IFF_RUNNING) {
1373				/*
1374				 * Multicast list has changed.  Set the
1375				 * hardware filter accordingly.
1376				 */
1377				RALINK_DEBUG(RALINK_DEBUG_INFO, "TODO!!!");
1378#if 0
1379				ralink_eth_filter_setup(sc);
1380#endif
1381			}
1382			error = 0;
1383		}
1384		break;
1385	}
1386
1387	splx(s);
1388
1389	/* Try to get more packets going. */
1390	if (sc->sc_ih != NULL)
1391		ralink_eth_start(ifp);
1392
1393	return error;
1394}
1395
1396/*
1397 * ralink_eth_intr
1398 *
1399 */
1400static int
1401ralink_eth_intr(void *arg)
1402{
1403	RALINK_DEBUG_FUNC_ENTRY();
1404	ralink_eth_softc_t * const sc = arg;
1405
1406	for (u_int n = 0;; n = 1) {
1407		u_int32_t status = fe_read(sc, RA_FE_INT_STATUS);
1408		fe_write(sc, RA_FE_INT_STATUS, ~0);
1409		RALINK_DEBUG(RALINK_DEBUG_REG,"%s() status: 0x%08x\n",
1410		    __func__, status);
1411#if defined(MT7628)
1412		if ((status & (RA_FE_INT_RX_DONE_INT1 | RA_FE_INT_RX_DONE_INT0 |
1413		    RA_FE_INT_TX_DONE_INT3 | RA_FE_INT_TX_DONE_INT2 |
1414		    RA_FE_INT_TX_DONE_INT1 | RA_FE_INT_TX_DONE_INT0)) == 0) {
1415			if (n == 0)
1416				sc->sc_evcnt_spurious_intr.ev_count++;
1417			return (n != 0);
1418		}
1419
1420		if (status & (RA_FE_INT_RX_DONE_INT1 | RA_FE_INT_RX_DONE_INT0))
1421			ralink_eth_rxintr(sc);
1422
1423		if (status & (RA_FE_INT_TX_DONE_INT3 | RA_FE_INT_TX_DONE_INT2 |
1424		    RA_FE_INT_TX_DONE_INT1 | RA_FE_INT_TX_DONE_INT0))
1425			ralink_eth_txintr(sc);
1426#else
1427		if ((status & (FE_INT_RX | FE_INT_TX0)) == 0) {
1428			if (n == 0)
1429				sc->sc_evcnt_spurious_intr.ev_count++;
1430			return (n != 0);
1431		}
1432
1433		if (status & FE_INT_RX)
1434			ralink_eth_rxintr(sc);
1435
1436		if (status & FE_INT_TX0)
1437			ralink_eth_txintr(sc);
1438#endif
1439	}
1440
1441	/* Try to get more packets going. */
1442	if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
1443
1444	return 1;
1445}
1446
1447/*
1448 * ralink_eth_rxintr
1449 */
1450static void
1451ralink_eth_rxintr(ralink_eth_softc_t *sc)
1452{
1453	RALINK_DEBUG_FUNC_ENTRY();
1454	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
1455	struct ralink_eth_rxstate *rxs;
1456	struct mbuf *m;
1457	int len;
1458	int rx_cpu_idx;
1459
1460	KASSERT(curcpu()->ci_cpl >= IPL_NET);
1461	sc->sc_evcnt_rxintr.ev_count++;
1462	rx_cpu_idx = fe_read(sc, RA_FE_PDMA_RX0_CPU_IDX);
1463
1464	for (;;) {
1465		rx_cpu_idx = (rx_cpu_idx + 1) % RALINK_ETH_NUM_RX_DESC;
1466
1467		rxs = &sc->sc_rxstate[rx_cpu_idx];
1468
1469		bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
1470		    (int)&sc->sc_rxdesc[rx_cpu_idx] - (int)sc->sc_descs,
1471		    sizeof(struct ralink_rx_desc),
1472		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1473
1474		RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1475		    rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].data_ptr,
1476		    sc->sc_rxdesc[rx_cpu_idx].data_ptr);
1477		RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1478		    rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info1,
1479		    sc->sc_rxdesc[rx_cpu_idx].rxd_info1);
1480		RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1481		    rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].unused,
1482		    sc->sc_rxdesc[rx_cpu_idx].unused);
1483		RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) 0x%08x: 0x%08x\n",
1484		    rx_cpu_idx, (int)&sc->sc_rxdesc[rx_cpu_idx].rxd_info2,
1485		    sc->sc_rxdesc[rx_cpu_idx].rxd_info2);
1486
1487		if (!(sc->sc_rxdesc[rx_cpu_idx].rxd_info1 & RXD_DDONE))
1488			break;
1489
1490		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1491			rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1492
1493		/*
1494		 * No errors; receive the packet.
1495		 * Note the chip includes the CRC with every packet.
1496		 */
1497		len = RXD_LEN0(sc->sc_rxdesc[rx_cpu_idx].rxd_info1);
1498
1499		RALINK_DEBUG(RALINK_DEBUG_REG,"rx(%d) packet rx %d bytes\n",
1500		    rx_cpu_idx, len);
1501
1502		/*
1503		 * Allocate a new mbuf cluster.  If that fails, we are
1504		 * out of memory, and must drop the packet and recycle
1505		 * the buffer that's already attached to this descriptor.
1506		 */
1507		m = rxs->rxs_mbuf;
1508		if (ralink_eth_add_rxbuf(sc, rx_cpu_idx) != 0)
1509			break;
1510		m->m_data += RALINK_ETHER_ALIGN;
1511		m->m_pkthdr.len = m->m_len = len;
1512
1513#ifdef RALINK_ETH_DEBUG
1514 {
1515		struct ether_header *eh = mtod(m, struct ether_header *);
1516		printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost));
1517		printf("rx: eth_src: %s type: 0x%04x \n",
1518		    ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
1519		printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014));
1520		printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098));
1521
1522		unsigned char * s = mtod(m, unsigned char *);
1523		for (int j = 0; j < 32; j++)
1524			printf("%02x%c", *(s + j),
1525				(j == 15 || j == 31) ? '\n' : ' ');
1526 }
1527#endif
1528
1529		/*
1530		 * claim the buffer here since we can't do it at
1531		 * allocation time due to the SW partitions
1532		 */
1533		MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1534
1535		/* push it up the interface */
1536		m_set_rcvif(m, ifp);
1537
1538#ifdef RALINK_ETH_DEBUG
1539 {
1540		struct ether_header *eh = mtod(m, struct ether_header *);
1541		printf("rx: eth_dst: %s ", ether_sprintf(eh->ether_dhost));
1542		printf("rx: eth_src: %s type: 0x%04x\n",
1543		    ether_sprintf(eh->ether_shost), ntohs(eh->ether_type));
1544		printf("0x14: %08x\n", *(volatile unsigned int *)(0xb0110014));
1545		printf("0x98: %08x\n", *(volatile unsigned int *)(0xb0110098));
1546
1547		unsigned char * s = mtod(m, unsigned char *);
1548		for (int j = 0; j < 32; j++)
1549			printf("%02x%c", *(s + j),
1550			    (j == 15 || j == 31) ? '\n' : ' ');
1551 }
1552#endif
1553
1554		/*
1555		 * XXX: M_CSUM_TCPv4 and M_CSUM_UDPv4 do not currently work when
1556		 * using PF's ROUTETO option for load balancing.
1557		 */
1558		m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1559
1560		/* Pass it on. */
1561		sc->sc_evcnt_input.ev_count++;
1562		if_percpuq_enqueue(ifp->if_percpuq, m);
1563
1564		fe_write(sc, RA_FE_PDMA_RX0_CPU_IDX, rx_cpu_idx);
1565	}
1566}
1567
1568/*
1569 * ralink_eth_txintr
1570 */
1571static void
1572ralink_eth_txintr(ralink_eth_softc_t *sc)
1573{
1574	RALINK_DEBUG_FUNC_ENTRY();
1575	struct ralink_eth_txstate *txs;
1576
1577	KASSERT(curcpu()->ci_cpl >= IPL_NET);
1578	sc->sc_evcnt_txintr.ev_count++;
1579
1580	/*
1581	 * Go through our Tx list and free mbufs for those
1582	 * frames that have been transmitted.
1583	 */
1584	while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1585		bus_dmamap_sync(sc->sc_dmat, sc->sc_pdmamap,
1586		    (int)&sc->sc_txdesc[txs->txs_idx] - (int)sc->sc_descs,
1587		    sizeof(struct ralink_tx_desc),
1588		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1589
1590		RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1591		    txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].data_ptr0,
1592		    sc->sc_txdesc[txs->txs_idx].data_ptr0);
1593		RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1594		    txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].txd_info1,
1595		    sc->sc_txdesc[txs->txs_idx].txd_info1);
1596		RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1597		    txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].data_ptr1,
1598		    sc->sc_txdesc[txs->txs_idx].data_ptr1);
1599		RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) 0x%08x: 0x%08x\n",
1600		    txs->txs_idx, (int)&sc->sc_txdesc[txs->txs_idx].txd_info2,
1601		    sc->sc_txdesc[txs->txs_idx].txd_info2);
1602
1603		/* we're finished if the current tx isn't done */
1604		if (!(sc->sc_txdesc[txs->txs_idx].txd_info1 & TXD_DDONE))
1605			break;
1606
1607		RALINK_DEBUG(RALINK_DEBUG_REG,"-tx(%d) transmitted\n",
1608		   txs->txs_idx);
1609
1610		SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1611
1612		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0,
1613		    txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1614		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1615		m_freem(txs->txs_mbuf);
1616		txs->txs_mbuf = NULL;
1617
1618		SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1619
1620		struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1621		if_statinc(ifp, if_opackets);
1622		sc->sc_evcnt_output.ev_count++;
1623
1624		if (--sc->sc_pending_tx == 0)
1625			ifp->if_timer = 0;
1626	}
1627}
1628
1629/*
1630 * ralink_eth_mdio_enable
1631 */
1632#if defined(RT3050) || defined(RT3052)
1633static void
1634ralink_eth_mdio_enable(ralink_eth_softc_t *sc, bool enable)
1635{
1636	uint32_t data = sy_read(sc, RA_SYSCTL_GPIOMODE);
1637
1638	if (enable)
1639		data &= ~GPIOMODE_MDIO;
1640	else
1641		data |= GPIOMODE_MDIO;
1642
1643	sy_write(sc, RA_SYSCTL_GPIOMODE, data);
1644}
1645#else
1646#define ralink_eth_mdio_enable(sc, enable)
1647#endif
1648
1649/*
1650 * ralink_eth_mii_statchg
1651 */
1652static void
1653ralink_eth_mii_statchg(struct ifnet *ifp)
1654{
1655#if 0
1656	ralink_eth_softc_t * const sc = ifp->if_softc;
1657
1658#endif
1659}
1660
1661/*
1662 * ralink_eth_mii_tick
1663 *
1664 *	One second timer, used to tick the MIIs.
1665 */
1666static void
1667ralink_eth_mii_tick(void *arg)
1668{
1669	ralink_eth_softc_t * const sc = arg;
1670
1671	const int s = splnet();
1672	mii_tick(&sc->sc_mii);
1673	splx(s);
1674
1675	callout_reset(&sc->sc_tick_callout, hz, ralink_eth_mii_tick, sc);
1676}
1677
1678/*
1679 * ralink_eth_mii_read
1680 */
1681static int
1682ralink_eth_mii_read(device_t self, int phy_addr, int phy_reg, uint16_t *val)
1683{
1684	ralink_eth_softc_t *sc = device_private(self);
1685	KASSERT(sc != NULL);
1686#if 0
1687	printf("%s() phy_addr: %d  phy_reg: %d\n", __func__, phy_addr, phy_reg);
1688#endif
1689#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1690	if (phy_addr > 5)
1691		return -1;
1692#endif
1693
1694	/* We enable mdio gpio purpose register, and disable it when exit. */
1695	ralink_eth_mdio_enable(sc, true);
1696
1697	/*
1698	 * make sure previous read operation is complete
1699	 * TODO: timeout (linux uses jiffies to measure 5 seconds)
1700	 */
1701	for (;;) {
1702		/* rd_rdy: read operation is complete */
1703#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1704		if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0)
1705			break;
1706#else
1707		if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0)
1708			break;
1709#endif
1710	}
1711
1712#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1713	sw_write(sc, RA_ETH_SW_PCTL0,
1714	    PCTL0_RD_CMD | PCTL0_REG(phy_reg) | PCTL0_ADDR(phy_addr));
1715#else
1716	fe_write(sc, RA_FE_MDIO_ACCESS,
1717	    MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg));
1718	fe_write(sc, RA_FE_MDIO_ACCESS,
1719	    MDIO_ACCESS_PHY_ADDR(phy_addr) | MDIO_ACCESS_REG(phy_reg) |
1720	    MDIO_ACCESS_TRG);
1721#endif
1722
1723	/*
1724	 * make sure read operation is complete
1725	 * TODO: timeout (linux uses jiffies to measure 5 seconds)
1726	 */
1727	for (;;) {
1728#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1729		if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) != 0) {
1730			*val = PCTL1_RD_VAL(
1731			    sw_read(sc, RA_ETH_SW_PCTL1));
1732			ralink_eth_mdio_enable(sc, false);
1733			return 0;
1734		}
1735#else
1736		if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) {
1737			*val = MDIO_ACCESS_DATA(
1738			    fe_read(sc, RA_FE_MDIO_ACCESS));
1739			ralink_eth_mdio_enable(sc, false);
1740			return 0;
1741		}
1742#endif
1743	}
1744}
1745
1746/*
1747 * ralink_eth_mii_write
1748 */
1749static int
1750ralink_eth_mii_write(device_t self, int phy_addr, int phy_reg, uint16_t val)
1751{
1752	ralink_eth_softc_t *sc = device_private(self);
1753	KASSERT(sc != NULL);
1754#if 0
1755	printf("%s() phy_addr: %d  phy_reg: %d  val: 0x%04x\n",
1756	    __func__, phy_addr, phy_reg, val);
1757#endif
1758	ralink_eth_mdio_enable(sc, true);
1759
1760	/*
1761	 * make sure previous write operation is complete
1762	 * TODO: timeout (linux uses jiffies to measure 5 seconds)
1763	 */
1764	for (;;) {
1765#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1766		if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_RD_DONE) == 0)
1767			break;
1768#else
1769		if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0)
1770			break;
1771#endif
1772	}
1773
1774#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1775	sw_write(sc, RA_ETH_SW_PCTL0,
1776	    PCTL0_WR_CMD | PCTL0_WR_VAL(val) | PCTL0_REG(phy_reg) |
1777	    PCTL0_ADDR(phy_addr));
1778#else
1779	fe_write(sc, RA_FE_MDIO_ACCESS,
1780	    MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) |
1781	    MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val));
1782	fe_write(sc, RA_FE_MDIO_ACCESS,
1783	    MDIO_ACCESS_WR | MDIO_ACCESS_PHY_ADDR(phy_addr) |
1784	    MDIO_ACCESS_REG(phy_reg) | MDIO_ACCESS_DATA(val) |
1785	    MDIO_ACCESS_TRG);
1786#endif
1787
1788
1789	/* make sure write operation is complete */
1790	for (;;) {
1791#if defined(RT3050) || defined(RT3052) || defined(MT7628)
1792		if ((sw_read(sc, RA_ETH_SW_PCTL1) & PCTL1_WR_DONE) != 0) {
1793			ralink_eth_mdio_enable(sc, false);
1794			return 0;
1795		}
1796#else
1797		if ((fe_read(sc, RA_FE_MDIO_ACCESS) & MDIO_ACCESS_TRG) == 0) {
1798			ralink_eth_mdio_enable(sc, false);
1799			return 0;
1800		}
1801#endif
1802	}
1803}
1804