1/* $NetBSD: dwc_gmac.c,v 1.87 2024/06/16 17:11:11 skrll Exp $ */
2
3/*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 *  http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42#include <sys/cdefs.h>
43
44__KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.87 2024/06/16 17:11:11 skrll Exp $");
45
46/* #define	DWC_GMAC_DEBUG	1 */
47
48#ifdef _KERNEL_OPT
49#include "opt_inet.h"
50#endif
51
52#include <sys/param.h>
53#include <sys/bus.h>
54#include <sys/device.h>
55#include <sys/intr.h>
56#include <sys/systm.h>
57#include <sys/sockio.h>
58#include <sys/cprng.h>
59#include <sys/rndsource.h>
60
61#include <net/if.h>
62#include <net/if_ether.h>
63#include <net/if_media.h>
64#include <net/bpf.h>
65#ifdef INET
66#include <netinet/if_inarp.h>
67#endif
68
69#include <dev/mii/miivar.h>
70
71#include <dev/ic/dwc_gmac_reg.h>
72#include <dev/ic/dwc_gmac_var.h>
73
74static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
75static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
76static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78static int dwc_gmac_reset(struct dwc_gmac_softc *);
79static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
80static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
81static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
82static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
83static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
86static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
89static int dwc_gmac_init(struct ifnet *);
90static int dwc_gmac_init_locked(struct ifnet *);
91static void dwc_gmac_stop(struct ifnet *, int);
92static void dwc_gmac_stop_locked(struct ifnet *, int);
93static void dwc_gmac_start(struct ifnet *);
94static void dwc_gmac_start_locked(struct ifnet *);
95static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
96static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
97static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
98static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
99static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
100static int dwc_gmac_ifflags_cb(struct ethercom *);
101static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
102static int  dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
103static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
104static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
105static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
106static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
107static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
108static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
109static int  dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
110static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
111static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
112static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
113static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
114static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
115static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
116static int  dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
117
118static const struct dwc_gmac_desc_methods desc_methods_standard = {
119	.tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
120	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
121	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
122	.tx_set_len = dwc_gmac_desc_std_set_len,
123	.tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
124	.tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
125	.rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
126	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
127	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
128	.rx_set_len = dwc_gmac_desc_std_set_len,
129	.rx_get_len = dwc_gmac_desc_std_get_len,
130	.rx_has_error = dwc_gmac_desc_std_rx_has_error
131};
132
133static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
134	.tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
135	.tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
136	.tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
137	.tx_set_len = dwc_gmac_desc_enh_set_len,
138	.tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
139	.tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
140	.rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
141	.rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
142	.rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
143	.rx_set_len = dwc_gmac_desc_enh_set_len,
144	.rx_get_len = dwc_gmac_desc_enh_get_len,
145	.rx_has_error = dwc_gmac_desc_enh_rx_has_error
146};
147
148
149#define	TX_DESC_OFFSET(N)	((AWGE_RX_RING_COUNT + (N)) \
150				    * sizeof(struct dwc_gmac_dev_dmadesc))
151#define	TX_NEXT(N)		(((N) + 1) & (AWGE_TX_RING_COUNT - 1))
152
153#define RX_DESC_OFFSET(N)	((N) * sizeof(struct dwc_gmac_dev_dmadesc))
154#define	RX_NEXT(N)		(((N) + 1) & (AWGE_RX_RING_COUNT - 1))
155
156
157
158#define	GMAC_DEF_DMA_INT_MASK	(GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
159				GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
160				GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
161
162#define	GMAC_DMA_INT_ERRORS	(GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
163				GMAC_DMA_INT_FBE |	\
164				GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
165				GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
166				GMAC_DMA_INT_TJE)
167
168#define	AWIN_DEF_MAC_INTRMASK	\
169	(AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG |	\
170	AWIN_GMAC_MAC_INT_LINKCHG)
171
172#ifdef DWC_GMAC_DEBUG
173static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
174static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
175static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
176static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
177static void dwc_dump_status(struct dwc_gmac_softc *);
178static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
179#endif
180
181int
182dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
183{
184	uint8_t enaddr[ETHER_ADDR_LEN];
185	uint32_t maclo, machi, ver, hwft;
186	struct mii_data * const mii = &sc->sc_mii;
187	struct ifnet * const ifp = &sc->sc_ec.ec_if;
188	prop_dictionary_t dict;
189
190	mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
191	sc->sc_mii_clk = mii_clk & 7;
192
193	dict = device_properties(sc->sc_dev);
194	prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
195	if (ea != NULL) {
196		/*
197		 * If the MAC address is overridden by a device property,
198		 * use that.
199		 */
200		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
201		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
202		memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
203	} else {
204		/*
205		 * If we did not get an externaly configure address,
206		 * try to read one from the current filter setup,
207		 * before resetting the chip.
208		 */
209		maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
210		    AWIN_GMAC_MAC_ADDR0LO);
211		machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212		    AWIN_GMAC_MAC_ADDR0HI);
213
214		if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
215			/* fake MAC address */
216			maclo = 0x00f2 | (cprng_strong32() << 16);
217			machi = cprng_strong32();
218		}
219
220		enaddr[0] = maclo & 0x0ff;
221		enaddr[1] = (maclo >> 8) & 0x0ff;
222		enaddr[2] = (maclo >> 16) & 0x0ff;
223		enaddr[3] = (maclo >> 24) & 0x0ff;
224		enaddr[4] = machi & 0x0ff;
225		enaddr[5] = (machi >> 8) & 0x0ff;
226	}
227
228	ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
229	aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
230
231	/*
232	 * Init chip and do initial setup
233	 */
234	if (dwc_gmac_reset(sc) != 0)
235		return ENXIO;	/* not much to cleanup, haven't attached yet */
236	dwc_gmac_write_hwaddr(sc, enaddr);
237	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
238	    ether_sprintf(enaddr));
239
240	hwft = 0;
241	if (ver >= 0x35) {
242		hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
243		    AWIN_GMAC_DMA_HWFEATURES);
244		aprint_normal_dev(sc->sc_dev,
245		    "HW feature mask: %x\n", hwft);
246	}
247
248	if (sizeof(bus_addr_t) > 4) {
249		int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
250		    &sc->sc_dmat, BUS_DMA_WAITOK);
251		if (error != 0) {
252			aprint_error_dev(sc->sc_dev,
253			    "failed to create DMA subregion\n");
254			return ENOMEM;
255		}
256	}
257
258	if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
259		aprint_normal_dev(sc->sc_dev,
260		    "Using enhanced descriptor format\n");
261		sc->sc_descm = &desc_methods_enhanced;
262	} else {
263		sc->sc_descm = &desc_methods_standard;
264	}
265	if (hwft & GMAC_DMA_FEAT_RMON) {
266		uint32_t val;
267
268		/* Mask all MMC interrupts */
269		val = 0xffffffff;
270		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
271		    GMAC_MMC_RX_INT_MSK, val);
272		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
273		    GMAC_MMC_TX_INT_MSK, val);
274	}
275
276	/*
277	 * Allocate Tx and Rx rings
278	 */
279	if (dwc_gmac_alloc_dma_rings(sc) != 0) {
280		aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
281		goto fail;
282	}
283
284	if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
285		aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
286		goto fail;
287	}
288
289	if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
290		aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
291		goto fail;
292	}
293
294	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
295	mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
296	mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
297
298	/*
299	 * Prepare interface data
300	 */
301	ifp->if_softc = sc;
302	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
303	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
304#ifdef DWCGMAC_MPSAFE
305	ifp->if_extflags = IFEF_MPSAFE;
306#endif
307	ifp->if_ioctl = dwc_gmac_ioctl;
308	ifp->if_start = dwc_gmac_start;
309	ifp->if_init = dwc_gmac_init;
310	ifp->if_stop = dwc_gmac_stop;
311	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
312	IFQ_SET_READY(&ifp->if_snd);
313
314	/*
315	 * Attach MII subdevices
316	 */
317	sc->sc_ec.ec_mii = &sc->sc_mii;
318	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
319	mii->mii_ifp = ifp;
320	mii->mii_readreg = dwc_gmac_miibus_read_reg;
321	mii->mii_writereg = dwc_gmac_miibus_write_reg;
322	mii->mii_statchg = dwc_gmac_miibus_statchg;
323	mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
324	    MIIF_DOPAUSE);
325
326	if (LIST_EMPTY(&mii->mii_phys)) {
327		aprint_error_dev(sc->sc_dev, "no PHY found!\n");
328		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
329		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
330	} else {
331		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
332	}
333
334	/*
335	 * We can support 802.1Q VLAN-sized frames.
336	 */
337	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
338
339	/*
340	 * Ready, attach interface
341	 */
342	/* Attach the interface. */
343	if_initialize(ifp);
344	sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
345	if_deferred_start_init(ifp, NULL);
346	ether_ifattach(ifp, enaddr);
347	ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
348	if_register(ifp);
349	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
350	    RND_TYPE_NET, RND_FLAG_DEFAULT);
351
352	/*
353	 * Enable interrupts
354	 */
355	mutex_enter(sc->sc_lock);
356	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
357	    AWIN_DEF_MAC_INTRMASK);
358	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
359	    GMAC_DEF_DMA_INT_MASK);
360	mutex_exit(sc->sc_lock);
361
362	return 0;
363
364fail:
365	dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
366	dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
367	dwc_gmac_free_dma_rings(sc);
368	mutex_destroy(&sc->sc_mdio_lock);
369
370	return ENXIO;
371}
372
373
374
375static int
376dwc_gmac_reset(struct dwc_gmac_softc *sc)
377{
378	size_t cnt;
379	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
380	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
381	    | GMAC_BUSMODE_RESET);
382	for (cnt = 0; cnt < 30000; cnt++) {
383		if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
384		    & GMAC_BUSMODE_RESET) == 0)
385			return 0;
386		delay(10);
387	}
388
389	aprint_error_dev(sc->sc_dev, "reset timed out\n");
390	return EIO;
391}
392
393static void
394dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
395    uint8_t enaddr[ETHER_ADDR_LEN])
396{
397	uint32_t hi, lo;
398
399	hi = enaddr[4] | (enaddr[5] << 8);
400	lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
401	    | ((uint32_t)enaddr[3] << 24);
402	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
403	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
404}
405
406static int
407dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
408{
409	struct dwc_gmac_softc * const sc = device_private(self);
410	uint16_t mii;
411	size_t cnt;
412
413	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
414	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
415	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
416	    | GMAC_MII_BUSY;
417
418	mutex_enter(&sc->sc_mdio_lock);
419	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
420
421	for (cnt = 0; cnt < 1000; cnt++) {
422		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
423		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
424			*val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
425			    AWIN_GMAC_MAC_MIIDATA);
426			break;
427		}
428		delay(10);
429	}
430
431	mutex_exit(&sc->sc_mdio_lock);
432
433	if (cnt >= 1000)
434		return ETIMEDOUT;
435
436	return 0;
437}
438
439static int
440dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
441{
442	struct dwc_gmac_softc * const sc = device_private(self);
443	uint16_t mii;
444	size_t cnt;
445
446	mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
447	    | __SHIFTIN(reg, GMAC_MII_REG_MASK)
448	    | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
449	    | GMAC_MII_BUSY | GMAC_MII_WRITE;
450
451	mutex_enter(&sc->sc_mdio_lock);
452	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
453	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
454
455	for (cnt = 0; cnt < 1000; cnt++) {
456		if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
457		    AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
458			break;
459		delay(10);
460	}
461
462	mutex_exit(&sc->sc_mdio_lock);
463
464	if (cnt >= 1000)
465		return ETIMEDOUT;
466
467	return 0;
468}
469
470static int
471dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
472	struct dwc_gmac_rx_ring *ring)
473{
474	struct dwc_gmac_rx_data *data;
475	bus_addr_t physaddr;
476	const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
477	int error, i, next;
478
479	ring->r_cur = ring->r_next = 0;
480	memset(ring->r_desc, 0, descsize);
481
482	/*
483	 * Pre-allocate Rx buffers and populate Rx ring.
484	 */
485	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
486		struct dwc_gmac_dev_dmadesc *desc;
487
488		data = &sc->sc_rxq.r_data[i];
489
490		MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
491		if (data->rd_m == NULL) {
492			aprint_error_dev(sc->sc_dev,
493			    "could not allocate rx mbuf #%d\n", i);
494			error = ENOMEM;
495			goto fail;
496		}
497		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
498		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
499		if (error != 0) {
500			aprint_error_dev(sc->sc_dev,
501			    "could not create DMA map\n");
502			data->rd_map = NULL;
503			goto fail;
504		}
505		MCLGET(data->rd_m, M_DONTWAIT);
506		if (!(data->rd_m->m_flags & M_EXT)) {
507			aprint_error_dev(sc->sc_dev,
508			    "could not allocate mbuf cluster #%d\n", i);
509			error = ENOMEM;
510			goto fail;
511		}
512		data->rd_m->m_len = data->rd_m->m_pkthdr.len
513		    = data->rd_m->m_ext.ext_size;
514		if (data->rd_m->m_len > AWGE_MAX_PACKET) {
515			data->rd_m->m_len = data->rd_m->m_pkthdr.len
516			    = AWGE_MAX_PACKET;
517		}
518
519		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
520		    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
521		if (error != 0) {
522			aprint_error_dev(sc->sc_dev,
523			    "could not load rx buf DMA map #%d", i);
524			goto fail;
525		}
526		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
527		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
528		physaddr = data->rd_map->dm_segs[0].ds_addr;
529
530		desc = &sc->sc_rxq.r_desc[i];
531		desc->ddesc_data = htole32(physaddr);
532		next = RX_NEXT(i);
533		desc->ddesc_next = htole32(ring->r_physaddr
534		    + next * sizeof(*desc));
535		sc->sc_descm->rx_init_flags(desc);
536		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
537		sc->sc_descm->rx_set_owned_by_dev(desc);
538	}
539
540	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
541	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
542	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
543	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
544	    ring->r_physaddr);
545
546	return 0;
547
548fail:
549	dwc_gmac_free_rx_ring(sc, ring);
550	return error;
551}
552
553static void
554dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
555	struct dwc_gmac_rx_ring *ring)
556{
557	struct dwc_gmac_dev_dmadesc *desc;
558	struct dwc_gmac_rx_data *data;
559	int i;
560
561	mutex_enter(&ring->r_mtx);
562	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
563		desc = &sc->sc_rxq.r_desc[i];
564		data = &sc->sc_rxq.r_data[i];
565		sc->sc_descm->rx_init_flags(desc);
566		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
567		sc->sc_descm->rx_set_owned_by_dev(desc);
568	}
569
570	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
571	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
572	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
573
574	ring->r_cur = ring->r_next = 0;
575	/* reset DMA address to start of ring */
576	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
577	    sc->sc_rxq.r_physaddr);
578	mutex_exit(&ring->r_mtx);
579}
580
581static int
582dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
583{
584	const size_t descsize = AWGE_TOTAL_RING_COUNT *
585		sizeof(struct dwc_gmac_dev_dmadesc);
586	int error, nsegs;
587	void *rings;
588
589	error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
590	    BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
591	if (error != 0) {
592		aprint_error_dev(sc->sc_dev,
593		    "could not create desc DMA map\n");
594		sc->sc_dma_ring_map = NULL;
595		goto fail;
596	}
597
598	error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
599	    &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
600	if (error != 0) {
601		aprint_error_dev(sc->sc_dev,
602		    "could not map DMA memory\n");
603		goto fail;
604	}
605
606	error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
607	    descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
608	if (error != 0) {
609		aprint_error_dev(sc->sc_dev,
610		    "could not allocate DMA memory\n");
611		goto fail;
612	}
613
614	error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
615	    descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
616	if (error != 0) {
617		aprint_error_dev(sc->sc_dev,
618		    "could not load desc DMA map\n");
619		goto fail;
620	}
621
622	/* give first AWGE_RX_RING_COUNT to the RX side */
623	sc->sc_rxq.r_desc = rings;
624	sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
625
626	/* and next rings to the TX side */
627	sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
628	sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
629	    AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
630
631	return 0;
632
633fail:
634	dwc_gmac_free_dma_rings(sc);
635	return error;
636}
637
638static void
639dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
640{
641	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
642	    sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
643	bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
644	bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
645	    AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
646	bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
647}
648
649static void
650dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
651{
652	struct dwc_gmac_rx_data *data;
653	int i;
654
655	if (ring->r_desc == NULL)
656		return;
657
658	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
659		data = &ring->r_data[i];
660
661		if (data->rd_map != NULL) {
662			bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
663			    AWGE_RX_RING_COUNT
664				* sizeof(struct dwc_gmac_dev_dmadesc),
665			    BUS_DMASYNC_POSTREAD);
666			bus_dmamap_unload(sc->sc_dmat, data->rd_map);
667			bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
668		}
669		if (data->rd_m != NULL)
670			m_freem(data->rd_m);
671	}
672}
673
674static int
675dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
676	struct dwc_gmac_tx_ring *ring)
677{
678	int i, error = 0;
679
680	ring->t_queued = 0;
681	ring->t_cur = ring->t_next = 0;
682
683	memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
684	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
685	    TX_DESC_OFFSET(0),
686	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
687	    BUS_DMASYNC_POSTWRITE);
688
689	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
690		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
691		    AWGE_TX_RING_COUNT, MCLBYTES, 0,
692		    BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
693		    &ring->t_data[i].td_map);
694		if (error != 0) {
695			aprint_error_dev(sc->sc_dev,
696			    "could not create TX DMA map #%d\n", i);
697			ring->t_data[i].td_map = NULL;
698			goto fail;
699		}
700		ring->t_desc[i].ddesc_next = htole32(
701		    ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
702		    * TX_NEXT(i));
703	}
704
705	return 0;
706
707fail:
708	dwc_gmac_free_tx_ring(sc, ring);
709	return error;
710}
711
712static void
713dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
714{
715	/* 'end' is pointing one descriptor beyond the last we want to sync */
716	if (end > start) {
717		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
718		    TX_DESC_OFFSET(start),
719		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
720		    ops);
721		return;
722	}
723	/* sync from 'start' to end of ring */
724	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
725	    TX_DESC_OFFSET(start),
726	    TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
727	    ops);
728	if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
729		/* sync from start of ring to 'end' */
730		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
731		    TX_DESC_OFFSET(0),
732		    TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
733		    ops);
734	}
735}
736
737static void
738dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
739	struct dwc_gmac_tx_ring *ring)
740{
741	int i;
742
743	mutex_enter(&ring->t_mtx);
744	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
745		struct dwc_gmac_tx_data *data = &ring->t_data[i];
746
747		if (data->td_m != NULL) {
748			bus_dmamap_sync(sc->sc_dmat, data->td_active,
749			    0, data->td_active->dm_mapsize,
750			    BUS_DMASYNC_POSTWRITE);
751			bus_dmamap_unload(sc->sc_dmat, data->td_active);
752			m_freem(data->td_m);
753			data->td_m = NULL;
754		}
755	}
756
757	bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
758	    TX_DESC_OFFSET(0),
759	    AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
760	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
761	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
762	    sc->sc_txq.t_physaddr);
763
764	ring->t_queued = 0;
765	ring->t_cur = ring->t_next = 0;
766	mutex_exit(&ring->t_mtx);
767}
768
769static void
770dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
771	struct dwc_gmac_tx_ring *ring)
772{
773	int i;
774
775	/* unload the maps */
776	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
777		struct dwc_gmac_tx_data *data = &ring->t_data[i];
778
779		if (data->td_m != NULL) {
780			bus_dmamap_sync(sc->sc_dmat, data->td_active,
781			    0, data->td_map->dm_mapsize,
782			    BUS_DMASYNC_POSTWRITE);
783			bus_dmamap_unload(sc->sc_dmat, data->td_active);
784			m_freem(data->td_m);
785			data->td_m = NULL;
786		}
787	}
788
789	/* and actually free them */
790	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
791		struct dwc_gmac_tx_data *data = &ring->t_data[i];
792
793		bus_dmamap_destroy(sc->sc_dmat, data->td_map);
794	}
795}
796
797static void
798dwc_gmac_miibus_statchg(struct ifnet *ifp)
799{
800	struct dwc_gmac_softc * const sc = ifp->if_softc;
801	struct mii_data * const mii = &sc->sc_mii;
802	uint32_t conf, flow;
803
804	/*
805	 * Set MII or GMII interface based on the speed
806	 * negotiated by the PHY.
807	 */
808	conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
809	conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
810	    | AWIN_GMAC_MAC_CONF_FULLDPLX);
811	conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
812	    | AWIN_GMAC_MAC_CONF_DISABLERXOWN
813	    | AWIN_GMAC_MAC_CONF_DISABLEJABBER
814	    | AWIN_GMAC_MAC_CONF_RXENABLE
815	    | AWIN_GMAC_MAC_CONF_TXENABLE;
816	switch (IFM_SUBTYPE(mii->mii_media_active)) {
817	case IFM_10_T:
818		conf |= AWIN_GMAC_MAC_CONF_MIISEL;
819		break;
820	case IFM_100_TX:
821		conf |= AWIN_GMAC_MAC_CONF_FES100 |
822			AWIN_GMAC_MAC_CONF_MIISEL;
823		break;
824	case IFM_1000_T:
825		break;
826	}
827	if (sc->sc_set_speed)
828		sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
829
830	flow = 0;
831	if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
832		conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
833		flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
834	}
835	if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
836		flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
837	}
838	if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
839		flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
840	}
841	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
842	    AWIN_GMAC_MAC_FLOWCTRL, flow);
843
844#ifdef DWC_GMAC_DEBUG
845	aprint_normal_dev(sc->sc_dev,
846	    "setting MAC conf register: %08x\n", conf);
847#endif
848
849	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
850	    AWIN_GMAC_MAC_CONF, conf);
851}
852
853static int
854dwc_gmac_init(struct ifnet *ifp)
855{
856	struct dwc_gmac_softc *sc = ifp->if_softc;
857
858	mutex_enter(sc->sc_lock);
859	int ret = dwc_gmac_init_locked(ifp);
860	mutex_exit(sc->sc_lock);
861
862	return ret;
863}
864
865static int
866dwc_gmac_init_locked(struct ifnet *ifp)
867{
868	struct dwc_gmac_softc *sc = ifp->if_softc;
869	uint32_t ffilt;
870
871	if (ifp->if_flags & IFF_RUNNING)
872		return 0;
873
874	dwc_gmac_stop_locked(ifp, 0);
875
876	/*
877	 * Configure DMA burst/transfer mode and RX/TX priorities.
878	 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
879	 */
880	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
881	    GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
882	    __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
883	    __SHIFTIN(2, GMAC_BUSMODE_PBL));
884
885	/*
886	 * Set up address filter
887	 */
888	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
889	if (ifp->if_flags & IFF_PROMISC) {
890		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
891	} else {
892		ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
893	}
894	if (ifp->if_flags & IFF_BROADCAST) {
895		ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
896	} else {
897		ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
898	}
899	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
900
901	/*
902	 * Set up multicast filter
903	 */
904	dwc_gmac_setmulti(sc);
905
906	/*
907	 * Set up dma pointer for RX and TX ring
908	 */
909	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
910	    sc->sc_rxq.r_physaddr);
911	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
912	    sc->sc_txq.t_physaddr);
913
914	/*
915	 * Start RX/TX part
916	 */
917	uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
918	if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
919		opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
920	}
921	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
922
923	sc->sc_stopping = false;
924
925	ifp->if_flags |= IFF_RUNNING;
926	sc->sc_txbusy = false;
927
928	return 0;
929}
930
931static void
932dwc_gmac_start(struct ifnet *ifp)
933{
934	struct dwc_gmac_softc *sc = ifp->if_softc;
935#ifdef DWCGMAC_MPSAFE
936	KASSERT(if_is_mpsafe(ifp));
937#endif
938
939	mutex_enter(sc->sc_lock);
940	if (!sc->sc_stopping) {
941		mutex_enter(&sc->sc_txq.t_mtx);
942		dwc_gmac_start_locked(ifp);
943		mutex_exit(&sc->sc_txq.t_mtx);
944	}
945	mutex_exit(sc->sc_lock);
946}
947
948static void
949dwc_gmac_start_locked(struct ifnet *ifp)
950{
951	struct dwc_gmac_softc *sc = ifp->if_softc;
952	int old = sc->sc_txq.t_queued;
953	int start = sc->sc_txq.t_cur;
954	struct mbuf *m0;
955
956	if ((ifp->if_flags & IFF_RUNNING) == 0)
957		return;
958	if (sc->sc_txbusy)
959		return;
960
961	for (;;) {
962		IFQ_POLL(&ifp->if_snd, m0);
963		if (m0 == NULL)
964			break;
965		if (dwc_gmac_queue(sc, m0) != 0) {
966			sc->sc_txbusy = true;
967			break;
968		}
969		IFQ_DEQUEUE(&ifp->if_snd, m0);
970		bpf_mtap(ifp, m0, BPF_D_OUT);
971		if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
972			sc->sc_txbusy = true;
973			break;
974		}
975	}
976
977	if (sc->sc_txq.t_queued != old) {
978		/* packets have been queued, kick it off */
979		dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
980		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
981
982#ifdef DWC_GMAC_DEBUG
983		dwc_dump_status(sc);
984#endif
985		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
986		    AWIN_GMAC_DMA_TXPOLL, ~0U);
987	}
988}
989
990static void
991dwc_gmac_stop(struct ifnet *ifp, int disable)
992{
993	struct dwc_gmac_softc *sc = ifp->if_softc;
994
995	mutex_enter(sc->sc_lock);
996	dwc_gmac_stop_locked(ifp, disable);
997	mutex_exit(sc->sc_lock);
998}
999
1000static void
1001dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1002{
1003	struct dwc_gmac_softc *sc = ifp->if_softc;
1004
1005	sc->sc_stopping = true;
1006
1007	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1008	    AWIN_GMAC_DMA_OPMODE,
1009	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1010		AWIN_GMAC_DMA_OPMODE)
1011		& ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1012	bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1013	    AWIN_GMAC_DMA_OPMODE,
1014	    bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1015		AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1016
1017	mii_down(&sc->sc_mii);
1018	dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1019	dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1020
1021	ifp->if_flags &= ~IFF_RUNNING;
1022	sc->sc_txbusy = false;
1023}
1024
1025/*
1026 * Add m0 to the TX ring
1027 */
1028static int
1029dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1030{
1031	struct dwc_gmac_dev_dmadesc *desc = NULL;
1032	struct dwc_gmac_tx_data *data = NULL;
1033	bus_dmamap_t map;
1034	int error, i, first;
1035
1036#ifdef DWC_GMAC_DEBUG
1037	aprint_normal_dev(sc->sc_dev,
1038	    "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1039#endif
1040
1041	first = sc->sc_txq.t_cur;
1042	map = sc->sc_txq.t_data[first].td_map;
1043
1044	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1045	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1046	if (error != 0) {
1047		aprint_error_dev(sc->sc_dev, "could not map mbuf "
1048		    "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1049		return error;
1050	}
1051
1052	if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1053		bus_dmamap_unload(sc->sc_dmat, map);
1054		return ENOBUFS;
1055	}
1056
1057	for (i = 0; i < map->dm_nsegs; i++) {
1058		data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1059		desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1060
1061		desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1062
1063#ifdef DWC_GMAC_DEBUG
1064		aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1065		    "len %lu\n", sc->sc_txq.t_cur,
1066		    (unsigned long)map->dm_segs[i].ds_addr,
1067		    (unsigned long)map->dm_segs[i].ds_len);
1068#endif
1069
1070		sc->sc_descm->tx_init_flags(desc);
1071		sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1072
1073		if (i == 0)
1074			sc->sc_descm->tx_set_first_frag(desc);
1075
1076		/*
1077		 * Defer passing ownership of the first descriptor
1078		 * until we are done.
1079		 */
1080		if (i != 0)
1081			sc->sc_descm->tx_set_owned_by_dev(desc);
1082
1083		sc->sc_txq.t_queued++;
1084		sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1085	}
1086
1087	sc->sc_descm->tx_set_last_frag(desc);
1088
1089	data->td_m = m0;
1090	data->td_active = map;
1091
1092	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1093	    BUS_DMASYNC_PREWRITE);
1094
1095	/* Pass first to device */
1096	sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1097
1098	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1099	    BUS_DMASYNC_PREWRITE);
1100
1101	return 0;
1102}
1103
1104/*
1105 * If the interface is up and running, only modify the receive
1106 * filter when setting promiscuous or debug mode.  Otherwise fall
1107 * through to ether_ioctl, which will reset the chip.
1108 */
1109static int
1110dwc_gmac_ifflags_cb(struct ethercom *ec)
1111{
1112	struct ifnet *ifp = &ec->ec_if;
1113	struct dwc_gmac_softc *sc = ifp->if_softc;
1114	int ret = 0;
1115
1116	mutex_enter(sc->sc_lock);
1117	u_short change = ifp->if_flags ^ sc->sc_if_flags;
1118	sc->sc_if_flags = ifp->if_flags;
1119
1120	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1121		ret = ENETRESET;
1122		goto out;
1123	}
1124	if ((change & IFF_PROMISC) != 0) {
1125		dwc_gmac_setmulti(sc);
1126	}
1127out:
1128	mutex_exit(sc->sc_lock);
1129
1130	return ret;
1131}
1132
1133static int
1134dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1135{
1136	struct dwc_gmac_softc *sc = ifp->if_softc;
1137	int error = 0;
1138
1139	int s = splnet();
1140	error = ether_ioctl(ifp, cmd, data);
1141
1142#ifdef DWCGMAC_MPSAFE
1143	splx(s);
1144#endif
1145
1146	if (error == ENETRESET) {
1147		error = 0;
1148		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1149			;
1150		else if (ifp->if_flags & IFF_RUNNING) {
1151			/*
1152			 * Multicast list has changed; set the hardware filter
1153			 * accordingly.
1154			 */
1155			mutex_enter(sc->sc_lock);
1156			dwc_gmac_setmulti(sc);
1157			mutex_exit(sc->sc_lock);
1158		}
1159	}
1160
1161	/* Try to get things going again */
1162	if (ifp->if_flags & IFF_UP)
1163		dwc_gmac_start(ifp);
1164	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1165
1166#ifndef DWCGMAC_MPSAFE
1167	splx(s);
1168#endif
1169
1170	return error;
1171}
1172
1173static void
1174dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1175{
1176	struct ifnet *ifp = &sc->sc_ec.ec_if;
1177	struct dwc_gmac_tx_data *data;
1178	struct dwc_gmac_dev_dmadesc *desc;
1179	int i, nsegs;
1180
1181	mutex_enter(&sc->sc_txq.t_mtx);
1182
1183	for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1184#ifdef DWC_GMAC_DEBUG
1185		aprint_normal_dev(sc->sc_dev,
1186		    "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1187		    i, sc->sc_txq.t_queued);
1188#endif
1189
1190		/*
1191		 * i + 1 does not need to be a valid descriptor,
1192		 * this is just a special notion to just sync
1193		 * a single tx descriptor (i)
1194		 */
1195		dwc_gmac_txdesc_sync(sc, i, i + 1,
1196		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1197
1198		desc = &sc->sc_txq.t_desc[i];
1199		if (sc->sc_descm->tx_is_owned_by_dev(desc))
1200			break;
1201
1202		data = &sc->sc_txq.t_data[i];
1203		if (data->td_m == NULL)
1204			continue;
1205
1206		if_statinc(ifp, if_opackets);
1207		nsegs = data->td_active->dm_nsegs;
1208		bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1209		    data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1210		bus_dmamap_unload(sc->sc_dmat, data->td_active);
1211
1212#ifdef DWC_GMAC_DEBUG
1213		aprint_normal_dev(sc->sc_dev,
1214		    "dwc_gmac_tx_intr: done with packet at desc #%d, "
1215		    "freeing mbuf %p\n", i, data->td_m);
1216#endif
1217
1218		m_freem(data->td_m);
1219		data->td_m = NULL;
1220
1221		sc->sc_txq.t_queued -= nsegs;
1222	}
1223
1224	sc->sc_txq.t_next = i;
1225
1226	if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1227		sc->sc_txbusy = false;
1228	}
1229	mutex_exit(&sc->sc_txq.t_mtx);
1230}
1231
1232static void
1233dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1234{
1235	struct ifnet *ifp = &sc->sc_ec.ec_if;
1236	struct dwc_gmac_dev_dmadesc *desc;
1237	struct dwc_gmac_rx_data *data;
1238	bus_addr_t physaddr;
1239	struct mbuf *m, *mnew;
1240	int i, len, error;
1241
1242	mutex_enter(&sc->sc_rxq.r_mtx);
1243	for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1244		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1245		    RX_DESC_OFFSET(i), sizeof(*desc),
1246		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1247		desc = &sc->sc_rxq.r_desc[i];
1248		data = &sc->sc_rxq.r_data[i];
1249
1250		if (sc->sc_descm->rx_is_owned_by_dev(desc))
1251			break;
1252
1253		if (sc->sc_descm->rx_has_error(desc)) {
1254#ifdef DWC_GMAC_DEBUG
1255			aprint_normal_dev(sc->sc_dev,
1256			    "RX error: descriptor status %08x, skipping\n",
1257			    le32toh(desc->ddesc_status0));
1258#endif
1259			if_statinc(ifp, if_ierrors);
1260			goto skip;
1261		}
1262
1263		len = sc->sc_descm->rx_get_len(desc);
1264
1265#ifdef DWC_GMAC_DEBUG
1266		aprint_normal_dev(sc->sc_dev,
1267		    "rx int: device is done with descriptor #%d, len: %d\n",
1268		    i, len);
1269#endif
1270
1271		/*
1272		 * Try to get a new mbuf before passing this one
1273		 * up, if that fails, drop the packet and reuse
1274		 * the existing one.
1275		 */
1276		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1277		if (mnew == NULL) {
1278			if_statinc(ifp, if_ierrors);
1279			goto skip;
1280		}
1281		MCLGET(mnew, M_DONTWAIT);
1282		if ((mnew->m_flags & M_EXT) == 0) {
1283			m_freem(mnew);
1284			if_statinc(ifp, if_ierrors);
1285			goto skip;
1286		}
1287		mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1288		if (mnew->m_len > AWGE_MAX_PACKET) {
1289			mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1290		}
1291
1292		/* unload old DMA map */
1293		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1294		    data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1295		bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1296
1297		/* and reload with new mbuf */
1298		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1299		    mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1300		if (error != 0) {
1301			m_freem(mnew);
1302			/* try to reload old mbuf */
1303			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1304			    data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1305			if (error != 0) {
1306				panic("%s: could not load old rx mbuf",
1307				    device_xname(sc->sc_dev));
1308			}
1309			if_statinc(ifp, if_ierrors);
1310			goto skip;
1311		}
1312		physaddr = data->rd_map->dm_segs[0].ds_addr;
1313
1314		/*
1315		 * New mbuf loaded, update RX ring and continue
1316		 */
1317		m = data->rd_m;
1318		data->rd_m = mnew;
1319		desc->ddesc_data = htole32(physaddr);
1320
1321		/* finalize mbuf */
1322		m->m_pkthdr.len = m->m_len = len;
1323		m_set_rcvif(m, ifp);
1324		m->m_flags |= M_HASFCS;
1325
1326		if_percpuq_enqueue(sc->sc_ipq, m);
1327
1328skip:
1329		bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1330		    data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1331
1332		sc->sc_descm->rx_init_flags(desc);
1333		sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1334		sc->sc_descm->rx_set_owned_by_dev(desc);
1335
1336		bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1337		    RX_DESC_OFFSET(i), sizeof(*desc),
1338		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1339	}
1340
1341	/* update RX pointer */
1342	sc->sc_rxq.r_cur = i;
1343
1344	mutex_exit(&sc->sc_rxq.r_mtx);
1345}
1346
1347static void
1348dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1349{
1350	struct ifnet * const ifp = &sc->sc_ec.ec_if;
1351	struct ether_multi *enm;
1352	struct ether_multistep step;
1353	struct ethercom *ec = &sc->sc_ec;
1354	uint32_t hashes[2] = { 0, 0 };
1355	uint32_t ffilt, h;
1356	int mcnt;
1357
1358	KASSERT(mutex_owned(sc->sc_lock));
1359
1360	ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1361
1362	if (ifp->if_flags & IFF_PROMISC) {
1363		ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1364		goto special_filter;
1365	}
1366
1367	ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1368
1369	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1370	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1371
1372	ETHER_LOCK(ec);
1373	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1374	ETHER_FIRST_MULTI(step, ec, enm);
1375	mcnt = 0;
1376	while (enm != NULL) {
1377		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1378		    ETHER_ADDR_LEN) != 0) {
1379			ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1380			ec->ec_flags |= ETHER_F_ALLMULTI;
1381			ETHER_UNLOCK(ec);
1382			goto special_filter;
1383		}
1384
1385		h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1386		hashes[h >> 5] |= (1 << (h & 0x1f));
1387
1388		mcnt++;
1389		ETHER_NEXT_MULTI(step, enm);
1390	}
1391	ETHER_UNLOCK(ec);
1392
1393	if (mcnt)
1394		ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1395	else
1396		ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1397
1398	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1399	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1400	    hashes[0]);
1401	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1402	    hashes[1]);
1403	sc->sc_if_flags = ifp->if_flags;
1404
1405#ifdef DWC_GMAC_DEBUG
1406	dwc_gmac_dump_ffilt(sc, ffilt);
1407#endif
1408	return;
1409
1410special_filter:
1411#ifdef DWC_GMAC_DEBUG
1412	dwc_gmac_dump_ffilt(sc, ffilt);
1413#endif
1414	/* no MAC hashes, ALLMULTI or PROMISC */
1415	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1416	    ffilt);
1417	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1418	    0xffffffff);
1419	bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1420	    0xffffffff);
1421	sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1422}
1423
1424int
1425dwc_gmac_intr(struct dwc_gmac_softc *sc)
1426{
1427	uint32_t status, dma_status;
1428	int rv = 0;
1429
1430	if (sc->sc_stopping)
1431		return 0;
1432
1433	status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1434	if (status & AWIN_GMAC_MII_IRQ) {
1435		(void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1436		    AWIN_GMAC_MII_STATUS);
1437		rv = 1;
1438		mii_pollstat(&sc->sc_mii);
1439	}
1440
1441	dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1442	    AWIN_GMAC_DMA_STATUS);
1443
1444	if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1445		rv = 1;
1446
1447	if (dma_status & GMAC_DMA_INT_TIE)
1448		dwc_gmac_tx_intr(sc);
1449
1450	if (dma_status & GMAC_DMA_INT_RIE)
1451		dwc_gmac_rx_intr(sc);
1452
1453	/*
1454	 * Check error conditions
1455	 */
1456	if (dma_status & GMAC_DMA_INT_ERRORS) {
1457		if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1458#ifdef DWC_GMAC_DEBUG
1459		dwc_dump_and_abort(sc, "interrupt error condition");
1460#endif
1461	}
1462
1463	rnd_add_uint32(&sc->rnd_source, dma_status);
1464
1465	/* ack interrupt */
1466	if (dma_status)
1467		bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1468		    AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1469
1470	/*
1471	 * Get more packets
1472	 */
1473	if (rv)
1474		if_schedule_deferred_start(&sc->sc_ec.ec_if);
1475
1476	return rv;
1477}
1478
1479static void
1480dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1481{
1482
1483	desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1484}
1485
1486static int
1487dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1488{
1489
1490	return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1491}
1492
1493static void
1494dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1495{
1496	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1497
1498	desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1499		__SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1500}
1501
1502static uint32_t
1503dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1504{
1505
1506	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1507}
1508
1509static void
1510dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1511{
1512
1513	desc->ddesc_status0 = 0;
1514	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1515}
1516
1517static void
1518dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1519{
1520	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1521
1522	desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1523}
1524
1525static void
1526dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1527{
1528	uint32_t cntl = le32toh(desc->ddesc_cntl1);
1529
1530	desc->ddesc_cntl1 = htole32(cntl |
1531		DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1532}
1533
1534static void
1535dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1536{
1537
1538	desc->ddesc_status0 = 0;
1539	desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1540}
1541
1542static int
1543dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1544	return !!(le32toh(desc->ddesc_status0) &
1545		(DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1546}
1547
1548static void
1549dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1550{
1551	uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1552
1553	desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1554		__SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1555}
1556
1557static uint32_t
1558dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1559{
1560
1561	return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1562}
1563
1564static void
1565dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1566{
1567
1568	desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1569	desc->ddesc_cntl1 = 0;
1570}
1571
1572static void
1573dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1574{
1575	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1576
1577	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1578}
1579
1580static void
1581dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1582{
1583	uint32_t tdes0 = le32toh(desc->ddesc_status0);
1584
1585	desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1586}
1587
1588static void
1589dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1590{
1591
1592	desc->ddesc_status0 = 0;
1593	desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1594}
1595
1596static int
1597dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1598{
1599
1600	return !!(le32toh(desc->ddesc_status0) &
1601		(DDESC_RDES0_ES | DDESC_RDES0_LE));
1602}
1603
1604#ifdef DWC_GMAC_DEBUG
1605static void
1606dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1607{
1608	aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1609	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1610	aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1611	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1612	aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1613	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1614	aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1615	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1616	aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1617	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1618	aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1619	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1620	aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1621	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1622	aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1623	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1624	aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1625	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1626	aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1627	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1628	aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1629	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1630	aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1631	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1632}
1633
1634static void
1635dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1636{
1637	int i;
1638
1639	aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1640	    sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1641	aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1642	for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1643		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1644		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1645		    "data: %08x next: %08x\n",
1646		    i, sc->sc_txq.t_physaddr +
1647			i * sizeof(struct dwc_gmac_dev_dmadesc),
1648		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1649		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1650	}
1651}
1652
1653static void
1654dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1655{
1656	int i;
1657
1658	aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1659	    sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1660	aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1661	for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1662		struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1663		aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1664		    "data: %08x next: %08x\n",
1665		    i, sc->sc_rxq.r_physaddr +
1666			i * sizeof(struct dwc_gmac_dev_dmadesc),
1667		    le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1668		    le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1669	}
1670}
1671
1672static void
1673dwc_dump_status(struct dwc_gmac_softc *sc)
1674{
1675	uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1676	    AWIN_GMAC_MAC_INTR);
1677	uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1678	    AWIN_GMAC_DMA_STATUS);
1679	char buf[200];
1680
1681	/* print interrupt state */
1682	snprintb(buf, sizeof(buf), "\177\20"
1683	    "b\x10""NI\0"
1684	    "b\x0f""AI\0"
1685	    "b\x0e""ER\0"
1686	    "b\x0d""FB\0"
1687	    "b\x0a""ET\0"
1688	    "b\x09""RW\0"
1689	    "b\x08""RS\0"
1690	    "b\x07""RU\0"
1691	    "b\x06""RI\0"
1692	    "b\x05""UN\0"
1693	    "b\x04""OV\0"
1694	    "b\x03""TJ\0"
1695	    "b\x02""TU\0"
1696	    "b\x01""TS\0"
1697	    "b\x00""TI\0"
1698	    "\0", dma_status);
1699	aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1700	    status, buf);
1701}
1702
1703static void
1704dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1705{
1706	dwc_dump_status(sc);
1707	dwc_gmac_dump_ffilt(sc,
1708	    bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1709	dwc_gmac_dump_dma(sc);
1710	dwc_gmac_dump_tx_desc(sc);
1711	dwc_gmac_dump_rx_desc(sc);
1712
1713	panic("%s", msg);
1714}
1715
1716static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1717{
1718	char buf[200];
1719
1720	/* print filter setup */
1721	snprintb(buf, sizeof(buf), "\177\20"
1722	    "b\x1f""RA\0"
1723	    "b\x0a""HPF\0"
1724	    "b\x09""SAF\0"
1725	    "b\x08""SAIF\0"
1726	    "b\x05""DBF\0"
1727	    "b\x04""PM\0"
1728	    "b\x03""DAIF\0"
1729	    "b\x02""HMC\0"
1730	    "b\x01""HUC\0"
1731	    "b\x00""PR\0"
1732	    "\0", ffilt);
1733	aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1734}
1735#endif
1736