if_arge.c revision 232627
1/*-
2 * Copyright (c) 2009, Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/mips/atheros/if_arge.c 232627 2012-03-06 22:16:10Z ray $");
30
31/*
32 * AR71XX gigabit ethernet driver
33 */
34#ifdef HAVE_KERNEL_OPTION_HEADERS
35#include "opt_device_polling.h"
36#endif
37
38#include <sys/param.h>
39#include <sys/endian.h>
40#include <sys/systm.h>
41#include <sys/sockio.h>
42#include <sys/mbuf.h>
43#include <sys/malloc.h>
44#include <sys/kernel.h>
45#include <sys/module.h>
46#include <sys/socket.h>
47#include <sys/taskqueue.h>
48#include <sys/sysctl.h>
49
50#include <net/if.h>
51#include <net/if_arp.h>
52#include <net/ethernet.h>
53#include <net/if_dl.h>
54#include <net/if_media.h>
55#include <net/if_types.h>
56
57#include <net/bpf.h>
58
59#include <machine/bus.h>
60#include <machine/cache.h>
61#include <machine/resource.h>
62#include <vm/vm_param.h>
63#include <vm/vm.h>
64#include <vm/pmap.h>
65#include <machine/pmap.h>
66#include <sys/bus.h>
67#include <sys/rman.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/miivar.h>
71
72#include <dev/pci/pcireg.h>
73#include <dev/pci/pcivar.h>
74
75MODULE_DEPEND(arge, ether, 1, 1, 1);
76MODULE_DEPEND(arge, miibus, 1, 1, 1);
77
78#include "miibus_if.h"
79
80#include <mips/atheros/ar71xxreg.h>
81#include <mips/atheros/if_argevar.h>
82#include <mips/atheros/ar71xx_setup.h>
83#include <mips/atheros/ar71xx_cpudef.h>
84
85typedef enum {
86	ARGE_DBG_MII 	=	0x00000001,
87	ARGE_DBG_INTR	=	0x00000002,
88	ARGE_DBG_TX	=	0x00000004,
89	ARGE_DBG_RX	=	0x00000008,
90	ARGE_DBG_ERR	=	0x00000010,
91	ARGE_DBG_RESET	=	0x00000020,
92} arge_debug_flags;
93
94#ifdef ARGE_DEBUG
95#define	ARGEDEBUG(_sc, _m, ...) 					\
96	do {								\
97		if ((_m) & (_sc)->arge_debug)				\
98			device_printf((_sc)->arge_dev, __VA_ARGS__);	\
99	} while (0)
100#else
101#define	ARGEDEBUG(_sc, _m, ...)
102#endif
103
104static int arge_attach(device_t);
105static int arge_detach(device_t);
106static void arge_flush_ddr(struct arge_softc *);
107static int arge_ifmedia_upd(struct ifnet *);
108static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109static int arge_ioctl(struct ifnet *, u_long, caddr_t);
110static void arge_init(void *);
111static void arge_init_locked(struct arge_softc *);
112static void arge_link_task(void *, int);
113static void arge_set_pll(struct arge_softc *, int, int);
114static int arge_miibus_readreg(device_t, int, int);
115static void arge_miibus_statchg(device_t);
116static int arge_miibus_writereg(device_t, int, int, int);
117static int arge_probe(device_t);
118static void arge_reset_dma(struct arge_softc *);
119static int arge_resume(device_t);
120static int arge_rx_ring_init(struct arge_softc *);
121static int arge_tx_ring_init(struct arge_softc *);
122#ifdef DEVICE_POLLING
123static int arge_poll(struct ifnet *, enum poll_cmd, int);
124#endif
125static int arge_shutdown(device_t);
126static void arge_start(struct ifnet *);
127static void arge_start_locked(struct ifnet *);
128static void arge_stop(struct arge_softc *);
129static int arge_suspend(device_t);
130
131static int arge_rx_locked(struct arge_softc *);
132static void arge_tx_locked(struct arge_softc *);
133static void arge_intr(void *);
134static int arge_intr_filter(void *);
135static void arge_tick(void *);
136
137/*
138 * ifmedia callbacks for multiPHY MAC
139 */
140void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
141int arge_multiphy_mediachange(struct ifnet *);
142
143static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
144static int arge_dma_alloc(struct arge_softc *);
145static void arge_dma_free(struct arge_softc *);
146static int arge_newbuf(struct arge_softc *, int);
147static __inline void arge_fixup_rx(struct mbuf *);
148
149static device_method_t arge_methods[] = {
150	/* Device interface */
151	DEVMETHOD(device_probe,		arge_probe),
152	DEVMETHOD(device_attach,	arge_attach),
153	DEVMETHOD(device_detach,	arge_detach),
154	DEVMETHOD(device_suspend,	arge_suspend),
155	DEVMETHOD(device_resume,	arge_resume),
156	DEVMETHOD(device_shutdown,	arge_shutdown),
157
158	/* MII interface */
159	DEVMETHOD(miibus_readreg,	arge_miibus_readreg),
160	DEVMETHOD(miibus_writereg,	arge_miibus_writereg),
161	DEVMETHOD(miibus_statchg,	arge_miibus_statchg),
162
163	DEVMETHOD_END
164};
165
166static driver_t arge_driver = {
167	"arge",
168	arge_methods,
169	sizeof(struct arge_softc)
170};
171
172static devclass_t arge_devclass;
173
174DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
175DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
176
177/*
178 * RedBoot passes MAC address to entry point as environment
179 * variable. platfrom_start parses it and stores in this variable
180 */
181extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
182
183static struct mtx miibus_mtx;
184
185MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
186
187/*
188 * Flushes all
189 */
190static void
191arge_flush_ddr(struct arge_softc *sc)
192{
193
194	ar71xx_device_flush_ddr_ge(sc->arge_mac_unit);
195}
196
197static int
198arge_probe(device_t dev)
199{
200
201	device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
202	return (0);
203}
204
205static void
206arge_attach_sysctl(device_t dev)
207{
208	struct arge_softc *sc = device_get_softc(dev);
209	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
210	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
211
212#ifdef	ARGE_DEBUG
213	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
214		"debug", CTLFLAG_RW, &sc->arge_debug, 0,
215		"arge interface debugging flags");
216#endif
217
218	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
219		"tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
220		"number of TX aligned packets");
221
222	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
223		"tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0,
224		"number of TX unaligned packets");
225
226#ifdef	ARGE_DEBUG
227	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod",
228	    CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, "");
229	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons",
230	    CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, "");
231	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt",
232	    CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, "");
233#endif
234}
235
236static int
237arge_attach(device_t dev)
238{
239	uint8_t			eaddr[ETHER_ADDR_LEN];
240	struct ifnet		*ifp;
241	struct arge_softc	*sc;
242	int			error = 0, rid, phymask;
243	uint32_t		reg, rnd;
244	int			is_base_mac_empty, i, phys_total;
245	uint32_t		hint;
246	long			eeprom_mac_addr = 0;
247
248	sc = device_get_softc(dev);
249	sc->arge_dev = dev;
250	sc->arge_mac_unit = device_get_unit(dev);
251
252	/*
253	 * Some units (eg the TP-Link WR-1043ND) do not have a convenient
254	 * EEPROM location to read the ethernet MAC address from.
255	 * OpenWRT simply snaffles it from a fixed location.
256	 *
257	 * Since multiple units seem to use this feature, include
258	 * a method of setting the MAC address based on an flash location
259	 * in CPU address space.
260	 */
261	if (sc->arge_mac_unit == 0 &&
262	    resource_long_value(device_get_name(dev), device_get_unit(dev),
263	    "eeprommac", &eeprom_mac_addr) == 0) {
264		int i;
265		const char *mac = (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr);
266		device_printf(dev, "Overriding MAC from EEPROM\n");
267		for (i = 0; i < 6; i++) {
268			ar711_base_mac[i] = mac[i];
269		}
270	}
271
272	KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
273	    ("if_arge: Only MAC0 and MAC1 supported"));
274
275	/*
276	 *  Get which PHY of 5 available we should use for this unit
277	 */
278	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
279	    "phymask", &phymask) != 0) {
280		/*
281		 * Use port 4 (WAN) for GE0. For any other port use
282		 * its PHY the same as its unit number
283		 */
284		if (sc->arge_mac_unit == 0)
285			phymask = (1 << 4);
286		else
287			/* Use all phys up to 4 */
288			phymask = (1 << 4) - 1;
289
290		device_printf(dev, "No PHY specified, using mask %d\n", phymask);
291	}
292
293	/*
294	 *  Get default media & duplex mode, by default its Base100T
295	 *  and full duplex
296	 */
297	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
298	    "media", &hint) != 0)
299		hint = 0;
300
301	if (hint == 1000)
302		sc->arge_media_type = IFM_1000_T;
303	else
304		sc->arge_media_type = IFM_100_TX;
305
306	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
307	    "fduplex", &hint) != 0)
308		hint = 1;
309
310	if (hint)
311		sc->arge_duplex_mode = IFM_FDX;
312	else
313		sc->arge_duplex_mode = 0;
314
315	sc->arge_phymask = phymask;
316
317	mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
318	    MTX_DEF);
319	callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
320	TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
321
322	/* Map control/status registers. */
323	sc->arge_rid = 0;
324	sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
325	    &sc->arge_rid, RF_ACTIVE);
326
327	if (sc->arge_res == NULL) {
328		device_printf(dev, "couldn't map memory\n");
329		error = ENXIO;
330		goto fail;
331	}
332
333	/* Allocate interrupts */
334	rid = 0;
335	sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
336	    RF_SHAREABLE | RF_ACTIVE);
337
338	if (sc->arge_irq == NULL) {
339		device_printf(dev, "couldn't map interrupt\n");
340		error = ENXIO;
341		goto fail;
342	}
343
344	/* Allocate ifnet structure. */
345	ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
346
347	if (ifp == NULL) {
348		device_printf(dev, "couldn't allocate ifnet structure\n");
349		error = ENOSPC;
350		goto fail;
351	}
352
353	ifp->if_softc = sc;
354	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
355	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356	ifp->if_ioctl = arge_ioctl;
357	ifp->if_start = arge_start;
358	ifp->if_init = arge_init;
359	sc->arge_if_flags = ifp->if_flags;
360
361	/* XXX: add real size */
362	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
363	ifp->if_snd.ifq_maxlen = ifqmaxlen;
364	IFQ_SET_READY(&ifp->if_snd);
365
366	ifp->if_capenable = ifp->if_capabilities;
367#ifdef DEVICE_POLLING
368	ifp->if_capabilities |= IFCAP_POLLING;
369#endif
370
371	is_base_mac_empty = 1;
372	for (i = 0; i < ETHER_ADDR_LEN; i++) {
373		eaddr[i] = ar711_base_mac[i] & 0xff;
374		if (eaddr[i] != 0)
375			is_base_mac_empty = 0;
376	}
377
378	if (is_base_mac_empty) {
379		/*
380		 * No MAC address configured. Generate the random one.
381		 */
382		if  (bootverbose)
383			device_printf(dev,
384			    "Generating random ethernet address.\n");
385
386		rnd = arc4random();
387		eaddr[0] = 'b';
388		eaddr[1] = 's';
389		eaddr[2] = 'd';
390		eaddr[3] = (rnd >> 24) & 0xff;
391		eaddr[4] = (rnd >> 16) & 0xff;
392		eaddr[5] = (rnd >> 8) & 0xff;
393	}
394
395	if (sc->arge_mac_unit != 0)
396		eaddr[5] +=  sc->arge_mac_unit;
397
398	if (arge_dma_alloc(sc) != 0) {
399		error = ENXIO;
400		goto fail;
401	}
402
403	/* Initialize the MAC block */
404
405	/* Step 1. Soft-reset MAC */
406	ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
407	DELAY(20);
408
409	/* Step 2. Punt the MAC core from the central reset register */
410	ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
411	DELAY(100);
412	ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
413
414	/* Step 3. Reconfigure MAC block */
415	ARGE_WRITE(sc, AR71XX_MAC_CFG1,
416		MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
417		MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
418
419	reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
420	reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
421	ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
422
423	ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
424
425	/* Reset MII bus */
426	ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
427	DELAY(100);
428	ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
429	DELAY(100);
430
431	/*
432	 * Set all Ethernet address registers to the same initial values
433	 * set all four addresses to 66-88-aa-cc-dd-ee
434	 */
435	ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1,
436	    (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8)  | eaddr[5]);
437	ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
438
439	ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
440	    FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
441
442	switch (ar71xx_soc) {
443		case AR71XX_SOC_AR7240:
444		case AR71XX_SOC_AR7241:
445		case AR71XX_SOC_AR7242:
446			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff);
447			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa);
448			break;
449		default:
450			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
451			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
452	}
453
454	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
455	    FIFO_RX_FILTMATCH_DEFAULT);
456
457	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
458	    FIFO_RX_FILTMASK_DEFAULT);
459
460	/*
461	 * Check if we have single-PHY MAC or multi-PHY
462	 */
463	phys_total = 0;
464	for (i = 0; i < ARGE_NPHY; i++)
465		if (phymask & (1 << i))
466			phys_total ++;
467
468	if (phys_total == 0) {
469		error = EINVAL;
470		goto fail;
471	}
472
473	if (phys_total == 1) {
474		/* Do MII setup. */
475		error = mii_attach(dev, &sc->arge_miibus, ifp,
476		    arge_ifmedia_upd, arge_ifmedia_sts, BMSR_DEFCAPMASK,
477		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
478		if (error != 0) {
479			device_printf(dev, "attaching PHYs failed\n");
480			goto fail;
481		}
482	}
483	else {
484		ifmedia_init(&sc->arge_ifmedia, 0,
485		    arge_multiphy_mediachange,
486		    arge_multiphy_mediastatus);
487		ifmedia_add(&sc->arge_ifmedia,
488		    IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode,
489		    0, NULL);
490		ifmedia_set(&sc->arge_ifmedia,
491		    IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode);
492		arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
493	}
494
495	/* Call MI attach routine. */
496	ether_ifattach(ifp, eaddr);
497
498	/* Hook interrupt last to avoid having to lock softc */
499	error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
500	    arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
501
502	if (error) {
503		device_printf(dev, "couldn't set up irq\n");
504		ether_ifdetach(ifp);
505		goto fail;
506	}
507
508	/* setup sysctl variables */
509	arge_attach_sysctl(dev);
510
511fail:
512	if (error)
513		arge_detach(dev);
514
515	return (error);
516}
517
518static int
519arge_detach(device_t dev)
520{
521	struct arge_softc	*sc = device_get_softc(dev);
522	struct ifnet		*ifp = sc->arge_ifp;
523
524	KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
525
526	/* These should only be active if attach succeeded */
527	if (device_is_attached(dev)) {
528		ARGE_LOCK(sc);
529		sc->arge_detach = 1;
530#ifdef DEVICE_POLLING
531		if (ifp->if_capenable & IFCAP_POLLING)
532			ether_poll_deregister(ifp);
533#endif
534
535		arge_stop(sc);
536		ARGE_UNLOCK(sc);
537		taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
538		ether_ifdetach(ifp);
539	}
540
541	if (sc->arge_miibus)
542		device_delete_child(dev, sc->arge_miibus);
543
544	bus_generic_detach(dev);
545
546	if (sc->arge_intrhand)
547		bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
548
549	if (sc->arge_res)
550		bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
551		    sc->arge_res);
552
553	if (ifp)
554		if_free(ifp);
555
556	arge_dma_free(sc);
557
558	mtx_destroy(&sc->arge_mtx);
559
560	return (0);
561
562}
563
564static int
565arge_suspend(device_t dev)
566{
567
568	panic("%s", __func__);
569	return 0;
570}
571
572static int
573arge_resume(device_t dev)
574{
575
576	panic("%s", __func__);
577	return 0;
578}
579
580static int
581arge_shutdown(device_t dev)
582{
583	struct arge_softc	*sc;
584
585	sc = device_get_softc(dev);
586
587	ARGE_LOCK(sc);
588	arge_stop(sc);
589	ARGE_UNLOCK(sc);
590
591	return (0);
592}
593
594static int
595arge_miibus_readreg(device_t dev, int phy, int reg)
596{
597	struct arge_softc * sc = device_get_softc(dev);
598	int i, result;
599	uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
600	    | (reg & MAC_MII_REG_MASK);
601
602	if ((sc->arge_phymask  & (1 << phy)) == 0)
603		return (0);
604
605	mtx_lock(&miibus_mtx);
606	ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
607	ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
608	ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
609
610	i = ARGE_MII_TIMEOUT;
611	while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
612	    MAC_MII_INDICATOR_BUSY) && (i--))
613		DELAY(5);
614
615	if (i < 0) {
616		mtx_unlock(&miibus_mtx);
617		ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
618		/* XXX: return ERRNO istead? */
619		return (-1);
620	}
621
622	result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
623	ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
624	mtx_unlock(&miibus_mtx);
625
626	ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__,
627		 phy, reg, addr, result);
628
629	return (result);
630}
631
632static int
633arge_miibus_writereg(device_t dev, int phy, int reg, int data)
634{
635	struct arge_softc * sc = device_get_softc(dev);
636	int i;
637	uint32_t addr =
638	    (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
639
640
641	if ((sc->arge_phymask  & (1 << phy)) == 0)
642		return (-1);
643
644	ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__,
645	    phy, reg, data);
646
647	mtx_lock(&miibus_mtx);
648	ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
649	ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data);
650
651	i = ARGE_MII_TIMEOUT;
652	while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
653	    MAC_MII_INDICATOR_BUSY) && (i--))
654		DELAY(5);
655
656	mtx_unlock(&miibus_mtx);
657
658	if (i < 0) {
659		ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
660		/* XXX: return ERRNO istead? */
661		return (-1);
662	}
663
664	return (0);
665}
666
667static void
668arge_miibus_statchg(device_t dev)
669{
670	struct arge_softc	*sc;
671
672	sc = device_get_softc(dev);
673	taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
674}
675
676static void
677arge_link_task(void *arg, int pending)
678{
679	struct arge_softc	*sc;
680	struct mii_data		*mii;
681	struct ifnet		*ifp;
682	uint32_t		media, duplex;
683
684	sc = (struct arge_softc *)arg;
685
686	ARGE_LOCK(sc);
687	mii = device_get_softc(sc->arge_miibus);
688	ifp = sc->arge_ifp;
689	if (mii == NULL || ifp == NULL ||
690	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
691		ARGE_UNLOCK(sc);
692		return;
693	}
694
695	if (mii->mii_media_status & IFM_ACTIVE) {
696
697		media = IFM_SUBTYPE(mii->mii_media_active);
698
699		if (media != IFM_NONE) {
700			sc->arge_link_status = 1;
701			duplex = mii->mii_media_active & IFM_GMASK;
702			arge_set_pll(sc, media, duplex);
703		}
704	} else
705		sc->arge_link_status = 0;
706
707	ARGE_UNLOCK(sc);
708}
709
710static void
711arge_set_pll(struct arge_softc *sc, int media, int duplex)
712{
713	uint32_t		cfg, ifcontrol, rx_filtmask;
714	uint32_t		fifo_tx;
715	int if_speed;
716
717	cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
718	cfg &= ~(MAC_CFG2_IFACE_MODE_1000
719	    | MAC_CFG2_IFACE_MODE_10_100
720	    | MAC_CFG2_FULL_DUPLEX);
721
722	if (duplex == IFM_FDX)
723		cfg |= MAC_CFG2_FULL_DUPLEX;
724
725	ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
726	ifcontrol &= ~MAC_IFCONTROL_SPEED;
727	rx_filtmask =
728	    ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
729	rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
730
731	switch(media) {
732	case IFM_10_T:
733		cfg |= MAC_CFG2_IFACE_MODE_10_100;
734		if_speed = 10;
735		break;
736	case IFM_100_TX:
737		cfg |= MAC_CFG2_IFACE_MODE_10_100;
738		ifcontrol |= MAC_IFCONTROL_SPEED;
739		if_speed = 100;
740		break;
741	case IFM_1000_T:
742	case IFM_1000_SX:
743		cfg |= MAC_CFG2_IFACE_MODE_1000;
744		rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
745		if_speed = 1000;
746		break;
747	default:
748		if_speed = 100;
749		device_printf(sc->arge_dev,
750		    "Unknown media %d\n", media);
751	}
752
753	switch (ar71xx_soc) {
754		case AR71XX_SOC_AR7240:
755		case AR71XX_SOC_AR7241:
756		case AR71XX_SOC_AR7242:
757			fifo_tx = 0x01f00140;
758			break;
759		case AR71XX_SOC_AR9130:
760		case AR71XX_SOC_AR9132:
761			fifo_tx = 0x00780fff;
762			break;
763		default:
764			fifo_tx = 0x008001ff;
765	}
766
767	ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
768	ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
769	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
770	    rx_filtmask);
771	ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx);
772
773	/* set PLL registers */
774	ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed);
775}
776
777
778static void
779arge_reset_dma(struct arge_softc *sc)
780{
781	ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
782	ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
783
784	ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
785	ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
786
787	/* Clear all possible RX interrupts */
788	while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
789		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
790
791	/*
792	 * Clear all possible TX interrupts
793	 */
794	while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
795		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
796
797	/*
798	 * Now Rx/Tx errors
799	 */
800	ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
801	    DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
802	ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
803	    DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
804}
805
806
807
808static void
809arge_init(void *xsc)
810{
811	struct arge_softc	 *sc = xsc;
812
813	ARGE_LOCK(sc);
814	arge_init_locked(sc);
815	ARGE_UNLOCK(sc);
816}
817
818static void
819arge_init_locked(struct arge_softc *sc)
820{
821	struct ifnet		*ifp = sc->arge_ifp;
822	struct mii_data		*mii;
823
824	ARGE_LOCK_ASSERT(sc);
825
826	arge_stop(sc);
827
828	/* Init circular RX list. */
829	if (arge_rx_ring_init(sc) != 0) {
830		device_printf(sc->arge_dev,
831		    "initialization failed: no memory for rx buffers\n");
832		arge_stop(sc);
833		return;
834	}
835
836	/* Init tx descriptors. */
837	arge_tx_ring_init(sc);
838
839	arge_reset_dma(sc);
840
841
842	if (sc->arge_miibus) {
843		sc->arge_link_status = 0;
844		mii = device_get_softc(sc->arge_miibus);
845		mii_mediachg(mii);
846	}
847	else {
848		/*
849		 * Sun always shines over multiPHY interface
850		 */
851		sc->arge_link_status = 1;
852	}
853
854	ifp->if_drv_flags |= IFF_DRV_RUNNING;
855	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
856
857	if (sc->arge_miibus)
858		callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
859
860	ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
861	ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
862
863	/* Start listening */
864	ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
865
866	/* Enable interrupts */
867	ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
868}
869
870/*
871 * Return whether the mbuf chain is correctly aligned
872 * for the arge TX engine.
873 *
874 * The TX engine requires each fragment to be aligned to a
875 * 4 byte boundary and the size of each fragment except
876 * the last to be a multiple of 4 bytes.
877 */
878static int
879arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
880{
881	struct mbuf *m;
882
883	for (m = m0; m != NULL; m = m->m_next) {
884		if((mtod(m, intptr_t) & 3) != 0)
885			return 0;
886		if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
887			return 0;
888	}
889	return 1;
890}
891
892/*
893 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
894 * pointers to the fragment pointers.
895 */
896static int
897arge_encap(struct arge_softc *sc, struct mbuf **m_head)
898{
899	struct arge_txdesc	*txd;
900	struct arge_desc	*desc, *prev_desc;
901	bus_dma_segment_t	txsegs[ARGE_MAXFRAGS];
902	int			error, i, nsegs, prod, prev_prod;
903	struct mbuf		*m;
904
905	ARGE_LOCK_ASSERT(sc);
906
907	/*
908	 * Fix mbuf chain, all fragments should be 4 bytes aligned and
909	 * even 4 bytes
910	 */
911	m = *m_head;
912	if (! arge_mbuf_chain_is_tx_aligned(m)) {
913		sc->stats.tx_pkts_unaligned++;
914		m = m_defrag(*m_head, M_DONTWAIT);
915		if (m == NULL) {
916			*m_head = NULL;
917			return (ENOBUFS);
918		}
919		*m_head = m;
920	} else
921		sc->stats.tx_pkts_aligned++;
922
923	prod = sc->arge_cdata.arge_tx_prod;
924	txd = &sc->arge_cdata.arge_txdesc[prod];
925	error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
926	    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
927
928	if (error == EFBIG) {
929		panic("EFBIG");
930	} else if (error != 0)
931		return (error);
932
933	if (nsegs == 0) {
934		m_freem(*m_head);
935		*m_head = NULL;
936		return (EIO);
937	}
938
939	/* Check number of available descriptors. */
940	if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
941		bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
942		return (ENOBUFS);
943	}
944
945	txd->tx_m = *m_head;
946	bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
947	    BUS_DMASYNC_PREWRITE);
948
949	/*
950	 * Make a list of descriptors for this packet. DMA controller will
951	 * walk through it while arge_link is not zero.
952	 */
953	prev_prod = prod;
954	desc = prev_desc = NULL;
955	for (i = 0; i < nsegs; i++) {
956		desc = &sc->arge_rdata.arge_tx_ring[prod];
957		desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
958
959		if (txsegs[i].ds_addr & 3)
960			panic("TX packet address unaligned\n");
961
962		desc->packet_addr = txsegs[i].ds_addr;
963
964		/* link with previous descriptor */
965		if (prev_desc)
966			prev_desc->packet_ctrl |= ARGE_DESC_MORE;
967
968		sc->arge_cdata.arge_tx_cnt++;
969		prev_desc = desc;
970		ARGE_INC(prod, ARGE_TX_RING_COUNT);
971	}
972
973	/* Update producer index. */
974	sc->arge_cdata.arge_tx_prod = prod;
975
976	/* Sync descriptors. */
977	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
978	    sc->arge_cdata.arge_tx_ring_map,
979	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980
981	/* Start transmitting */
982	ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n", __func__);
983	ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
984	return (0);
985}
986
987static void
988arge_start(struct ifnet *ifp)
989{
990	struct arge_softc	 *sc;
991
992	sc = ifp->if_softc;
993
994	ARGE_LOCK(sc);
995	arge_start_locked(ifp);
996	ARGE_UNLOCK(sc);
997}
998
999static void
1000arge_start_locked(struct ifnet *ifp)
1001{
1002	struct arge_softc	*sc;
1003	struct mbuf		*m_head;
1004	int			enq = 0;
1005
1006	sc = ifp->if_softc;
1007
1008	ARGE_LOCK_ASSERT(sc);
1009
1010	ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__);
1011
1012	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1013	    IFF_DRV_RUNNING || sc->arge_link_status == 0 )
1014		return;
1015
1016	/*
1017	 * Before we go any further, check whether we're already full.
1018	 * The below check errors out immediately if the ring is full
1019	 * and never gets a chance to set this flag. Although it's
1020	 * likely never needed, this at least avoids an unexpected
1021	 * situation.
1022	 */
1023	if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) {
1024		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1025		ARGEDEBUG(sc, ARGE_DBG_ERR, "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n",
1026		    __func__, sc->arge_cdata.arge_tx_cnt, ARGE_TX_RING_COUNT - 2);
1027		return;
1028	}
1029
1030	arge_flush_ddr(sc);
1031
1032	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1033	    sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
1034		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1035		if (m_head == NULL)
1036			break;
1037
1038
1039		/*
1040		 * Pack the data into the transmit ring.
1041		 */
1042		if (arge_encap(sc, &m_head)) {
1043			if (m_head == NULL)
1044				break;
1045			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1046			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1047			break;
1048		}
1049
1050		enq++;
1051		/*
1052		 * If there's a BPF listener, bounce a copy of this frame
1053		 * to him.
1054		 */
1055		ETHER_BPF_MTAP(ifp, m_head);
1056	}
1057	ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n", __func__, enq);
1058}
1059
1060static void
1061arge_stop(struct arge_softc *sc)
1062{
1063	struct ifnet	    *ifp;
1064
1065	ARGE_LOCK_ASSERT(sc);
1066
1067	ifp = sc->arge_ifp;
1068	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1069	if (sc->arge_miibus)
1070		callout_stop(&sc->arge_stat_callout);
1071
1072	/* mask out interrupts */
1073	ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1074
1075	arge_reset_dma(sc);
1076}
1077
1078
1079static int
1080arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1081{
1082	struct arge_softc		*sc = ifp->if_softc;
1083	struct ifreq		*ifr = (struct ifreq *) data;
1084	struct mii_data		*mii;
1085	int			error;
1086#ifdef DEVICE_POLLING
1087	int			mask;
1088#endif
1089
1090	switch (command) {
1091	case SIOCSIFFLAGS:
1092		ARGE_LOCK(sc);
1093		if ((ifp->if_flags & IFF_UP) != 0) {
1094			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1095				if (((ifp->if_flags ^ sc->arge_if_flags)
1096				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1097					/* XXX: handle promisc & multi flags */
1098				}
1099
1100			} else {
1101				if (!sc->arge_detach)
1102					arge_init_locked(sc);
1103			}
1104		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1105			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1106			arge_stop(sc);
1107		}
1108		sc->arge_if_flags = ifp->if_flags;
1109		ARGE_UNLOCK(sc);
1110		error = 0;
1111		break;
1112	case SIOCADDMULTI:
1113	case SIOCDELMULTI:
1114		/* XXX: implement SIOCDELMULTI */
1115		error = 0;
1116		break;
1117	case SIOCGIFMEDIA:
1118	case SIOCSIFMEDIA:
1119		if (sc->arge_miibus) {
1120			mii = device_get_softc(sc->arge_miibus);
1121			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1122		}
1123		else
1124			error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command);
1125		break;
1126	case SIOCSIFCAP:
1127		/* XXX: Check other capabilities */
1128#ifdef DEVICE_POLLING
1129		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1130		if (mask & IFCAP_POLLING) {
1131			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1132				ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1133				error = ether_poll_register(arge_poll, ifp);
1134				if (error)
1135					return error;
1136				ARGE_LOCK(sc);
1137				ifp->if_capenable |= IFCAP_POLLING;
1138				ARGE_UNLOCK(sc);
1139			} else {
1140				ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1141				error = ether_poll_deregister(ifp);
1142				ARGE_LOCK(sc);
1143				ifp->if_capenable &= ~IFCAP_POLLING;
1144				ARGE_UNLOCK(sc);
1145			}
1146		}
1147		error = 0;
1148		break;
1149#endif
1150	default:
1151		error = ether_ioctl(ifp, command, data);
1152		break;
1153	}
1154
1155	return (error);
1156}
1157
1158/*
1159 * Set media options.
1160 */
1161static int
1162arge_ifmedia_upd(struct ifnet *ifp)
1163{
1164	struct arge_softc		*sc;
1165	struct mii_data		*mii;
1166	struct mii_softc	*miisc;
1167	int			error;
1168
1169	sc = ifp->if_softc;
1170	ARGE_LOCK(sc);
1171	mii = device_get_softc(sc->arge_miibus);
1172	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1173		PHY_RESET(miisc);
1174	error = mii_mediachg(mii);
1175	ARGE_UNLOCK(sc);
1176
1177	return (error);
1178}
1179
1180/*
1181 * Report current media status.
1182 */
1183static void
1184arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1185{
1186	struct arge_softc		*sc = ifp->if_softc;
1187	struct mii_data		*mii;
1188
1189	mii = device_get_softc(sc->arge_miibus);
1190	ARGE_LOCK(sc);
1191	mii_pollstat(mii);
1192	ifmr->ifm_active = mii->mii_media_active;
1193	ifmr->ifm_status = mii->mii_media_status;
1194	ARGE_UNLOCK(sc);
1195}
1196
1197struct arge_dmamap_arg {
1198	bus_addr_t	arge_busaddr;
1199};
1200
1201static void
1202arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1203{
1204	struct arge_dmamap_arg	*ctx;
1205
1206	if (error != 0)
1207		return;
1208	ctx = arg;
1209	ctx->arge_busaddr = segs[0].ds_addr;
1210}
1211
1212static int
1213arge_dma_alloc(struct arge_softc *sc)
1214{
1215	struct arge_dmamap_arg	ctx;
1216	struct arge_txdesc	*txd;
1217	struct arge_rxdesc	*rxd;
1218	int			error, i;
1219
1220	/* Create parent DMA tag. */
1221	error = bus_dma_tag_create(
1222	    bus_get_dma_tag(sc->arge_dev),	/* parent */
1223	    1, 0,			/* alignment, boundary */
1224	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1225	    BUS_SPACE_MAXADDR,		/* highaddr */
1226	    NULL, NULL,			/* filter, filterarg */
1227	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1228	    0,				/* nsegments */
1229	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1230	    0,				/* flags */
1231	    NULL, NULL,			/* lockfunc, lockarg */
1232	    &sc->arge_cdata.arge_parent_tag);
1233	if (error != 0) {
1234		device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1235		goto fail;
1236	}
1237	/* Create tag for Tx ring. */
1238	error = bus_dma_tag_create(
1239	    sc->arge_cdata.arge_parent_tag,	/* parent */
1240	    ARGE_RING_ALIGN, 0,		/* alignment, boundary */
1241	    BUS_SPACE_MAXADDR,		/* lowaddr */
1242	    BUS_SPACE_MAXADDR,		/* highaddr */
1243	    NULL, NULL,			/* filter, filterarg */
1244	    ARGE_TX_DMA_SIZE,		/* maxsize */
1245	    1,				/* nsegments */
1246	    ARGE_TX_DMA_SIZE,		/* maxsegsize */
1247	    0,				/* flags */
1248	    NULL, NULL,			/* lockfunc, lockarg */
1249	    &sc->arge_cdata.arge_tx_ring_tag);
1250	if (error != 0) {
1251		device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1252		goto fail;
1253	}
1254
1255	/* Create tag for Rx ring. */
1256	error = bus_dma_tag_create(
1257	    sc->arge_cdata.arge_parent_tag,	/* parent */
1258	    ARGE_RING_ALIGN, 0,		/* alignment, boundary */
1259	    BUS_SPACE_MAXADDR,		/* lowaddr */
1260	    BUS_SPACE_MAXADDR,		/* highaddr */
1261	    NULL, NULL,			/* filter, filterarg */
1262	    ARGE_RX_DMA_SIZE,		/* maxsize */
1263	    1,				/* nsegments */
1264	    ARGE_RX_DMA_SIZE,		/* maxsegsize */
1265	    0,				/* flags */
1266	    NULL, NULL,			/* lockfunc, lockarg */
1267	    &sc->arge_cdata.arge_rx_ring_tag);
1268	if (error != 0) {
1269		device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1270		goto fail;
1271	}
1272
1273	/* Create tag for Tx buffers. */
1274	error = bus_dma_tag_create(
1275	    sc->arge_cdata.arge_parent_tag,	/* parent */
1276	    sizeof(uint32_t), 0,	/* alignment, boundary */
1277	    BUS_SPACE_MAXADDR,		/* lowaddr */
1278	    BUS_SPACE_MAXADDR,		/* highaddr */
1279	    NULL, NULL,			/* filter, filterarg */
1280	    MCLBYTES * ARGE_MAXFRAGS,	/* maxsize */
1281	    ARGE_MAXFRAGS,		/* nsegments */
1282	    MCLBYTES,			/* maxsegsize */
1283	    0,				/* flags */
1284	    NULL, NULL,			/* lockfunc, lockarg */
1285	    &sc->arge_cdata.arge_tx_tag);
1286	if (error != 0) {
1287		device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1288		goto fail;
1289	}
1290
1291	/* Create tag for Rx buffers. */
1292	error = bus_dma_tag_create(
1293	    sc->arge_cdata.arge_parent_tag,	/* parent */
1294	    ARGE_RX_ALIGN, 0,		/* alignment, boundary */
1295	    BUS_SPACE_MAXADDR,		/* lowaddr */
1296	    BUS_SPACE_MAXADDR,		/* highaddr */
1297	    NULL, NULL,			/* filter, filterarg */
1298	    MCLBYTES,			/* maxsize */
1299	    ARGE_MAXFRAGS,		/* nsegments */
1300	    MCLBYTES,			/* maxsegsize */
1301	    0,				/* flags */
1302	    NULL, NULL,			/* lockfunc, lockarg */
1303	    &sc->arge_cdata.arge_rx_tag);
1304	if (error != 0) {
1305		device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1306		goto fail;
1307	}
1308
1309	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1310	error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1311	    (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1312	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1313	if (error != 0) {
1314		device_printf(sc->arge_dev,
1315		    "failed to allocate DMA'able memory for Tx ring\n");
1316		goto fail;
1317	}
1318
1319	ctx.arge_busaddr = 0;
1320	error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1321	    sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1322	    ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1323	if (error != 0 || ctx.arge_busaddr == 0) {
1324		device_printf(sc->arge_dev,
1325		    "failed to load DMA'able memory for Tx ring\n");
1326		goto fail;
1327	}
1328	sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1329
1330	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1331	error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1332	    (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1333	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1334	if (error != 0) {
1335		device_printf(sc->arge_dev,
1336		    "failed to allocate DMA'able memory for Rx ring\n");
1337		goto fail;
1338	}
1339
1340	ctx.arge_busaddr = 0;
1341	error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1342	    sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1343	    ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1344	if (error != 0 || ctx.arge_busaddr == 0) {
1345		device_printf(sc->arge_dev,
1346		    "failed to load DMA'able memory for Rx ring\n");
1347		goto fail;
1348	}
1349	sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1350
1351	/* Create DMA maps for Tx buffers. */
1352	for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1353		txd = &sc->arge_cdata.arge_txdesc[i];
1354		txd->tx_m = NULL;
1355		txd->tx_dmamap = NULL;
1356		error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1357		    &txd->tx_dmamap);
1358		if (error != 0) {
1359			device_printf(sc->arge_dev,
1360			    "failed to create Tx dmamap\n");
1361			goto fail;
1362		}
1363	}
1364	/* Create DMA maps for Rx buffers. */
1365	if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1366	    &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1367		device_printf(sc->arge_dev,
1368		    "failed to create spare Rx dmamap\n");
1369		goto fail;
1370	}
1371	for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1372		rxd = &sc->arge_cdata.arge_rxdesc[i];
1373		rxd->rx_m = NULL;
1374		rxd->rx_dmamap = NULL;
1375		error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1376		    &rxd->rx_dmamap);
1377		if (error != 0) {
1378			device_printf(sc->arge_dev,
1379			    "failed to create Rx dmamap\n");
1380			goto fail;
1381		}
1382	}
1383
1384fail:
1385	return (error);
1386}
1387
1388static void
1389arge_dma_free(struct arge_softc *sc)
1390{
1391	struct arge_txdesc	*txd;
1392	struct arge_rxdesc	*rxd;
1393	int			i;
1394
1395	/* Tx ring. */
1396	if (sc->arge_cdata.arge_tx_ring_tag) {
1397		if (sc->arge_cdata.arge_tx_ring_map)
1398			bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1399			    sc->arge_cdata.arge_tx_ring_map);
1400		if (sc->arge_cdata.arge_tx_ring_map &&
1401		    sc->arge_rdata.arge_tx_ring)
1402			bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1403			    sc->arge_rdata.arge_tx_ring,
1404			    sc->arge_cdata.arge_tx_ring_map);
1405		sc->arge_rdata.arge_tx_ring = NULL;
1406		sc->arge_cdata.arge_tx_ring_map = NULL;
1407		bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1408		sc->arge_cdata.arge_tx_ring_tag = NULL;
1409	}
1410	/* Rx ring. */
1411	if (sc->arge_cdata.arge_rx_ring_tag) {
1412		if (sc->arge_cdata.arge_rx_ring_map)
1413			bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1414			    sc->arge_cdata.arge_rx_ring_map);
1415		if (sc->arge_cdata.arge_rx_ring_map &&
1416		    sc->arge_rdata.arge_rx_ring)
1417			bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1418			    sc->arge_rdata.arge_rx_ring,
1419			    sc->arge_cdata.arge_rx_ring_map);
1420		sc->arge_rdata.arge_rx_ring = NULL;
1421		sc->arge_cdata.arge_rx_ring_map = NULL;
1422		bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1423		sc->arge_cdata.arge_rx_ring_tag = NULL;
1424	}
1425	/* Tx buffers. */
1426	if (sc->arge_cdata.arge_tx_tag) {
1427		for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1428			txd = &sc->arge_cdata.arge_txdesc[i];
1429			if (txd->tx_dmamap) {
1430				bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1431				    txd->tx_dmamap);
1432				txd->tx_dmamap = NULL;
1433			}
1434		}
1435		bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1436		sc->arge_cdata.arge_tx_tag = NULL;
1437	}
1438	/* Rx buffers. */
1439	if (sc->arge_cdata.arge_rx_tag) {
1440		for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1441			rxd = &sc->arge_cdata.arge_rxdesc[i];
1442			if (rxd->rx_dmamap) {
1443				bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1444				    rxd->rx_dmamap);
1445				rxd->rx_dmamap = NULL;
1446			}
1447		}
1448		if (sc->arge_cdata.arge_rx_sparemap) {
1449			bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1450			    sc->arge_cdata.arge_rx_sparemap);
1451			sc->arge_cdata.arge_rx_sparemap = 0;
1452		}
1453		bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1454		sc->arge_cdata.arge_rx_tag = NULL;
1455	}
1456
1457	if (sc->arge_cdata.arge_parent_tag) {
1458		bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1459		sc->arge_cdata.arge_parent_tag = NULL;
1460	}
1461}
1462
1463/*
1464 * Initialize the transmit descriptors.
1465 */
1466static int
1467arge_tx_ring_init(struct arge_softc *sc)
1468{
1469	struct arge_ring_data	*rd;
1470	struct arge_txdesc	*txd;
1471	bus_addr_t		addr;
1472	int			i;
1473
1474	sc->arge_cdata.arge_tx_prod = 0;
1475	sc->arge_cdata.arge_tx_cons = 0;
1476	sc->arge_cdata.arge_tx_cnt = 0;
1477
1478	rd = &sc->arge_rdata;
1479	bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1480	for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1481		if (i == ARGE_TX_RING_COUNT - 1)
1482			addr = ARGE_TX_RING_ADDR(sc, 0);
1483		else
1484			addr = ARGE_TX_RING_ADDR(sc, i + 1);
1485		rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1486		rd->arge_tx_ring[i].next_desc = addr;
1487		txd = &sc->arge_cdata.arge_txdesc[i];
1488		txd->tx_m = NULL;
1489	}
1490
1491	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1492	    sc->arge_cdata.arge_tx_ring_map,
1493	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1494
1495	return (0);
1496}
1497
1498/*
1499 * Initialize the RX descriptors and allocate mbufs for them. Note that
1500 * we arrange the descriptors in a closed ring, so that the last descriptor
1501 * points back to the first.
1502 */
1503static int
1504arge_rx_ring_init(struct arge_softc *sc)
1505{
1506	struct arge_ring_data	*rd;
1507	struct arge_rxdesc	*rxd;
1508	bus_addr_t		addr;
1509	int			i;
1510
1511	sc->arge_cdata.arge_rx_cons = 0;
1512
1513	rd = &sc->arge_rdata;
1514	bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1515	for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1516		rxd = &sc->arge_cdata.arge_rxdesc[i];
1517		rxd->rx_m = NULL;
1518		rxd->desc = &rd->arge_rx_ring[i];
1519		if (i == ARGE_RX_RING_COUNT - 1)
1520			addr = ARGE_RX_RING_ADDR(sc, 0);
1521		else
1522			addr = ARGE_RX_RING_ADDR(sc, i + 1);
1523		rd->arge_rx_ring[i].next_desc = addr;
1524		if (arge_newbuf(sc, i) != 0) {
1525			return (ENOBUFS);
1526		}
1527	}
1528
1529	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1530	    sc->arge_cdata.arge_rx_ring_map,
1531	    BUS_DMASYNC_PREWRITE);
1532
1533	return (0);
1534}
1535
1536/*
1537 * Initialize an RX descriptor and attach an MBUF cluster.
1538 */
1539static int
1540arge_newbuf(struct arge_softc *sc, int idx)
1541{
1542	struct arge_desc		*desc;
1543	struct arge_rxdesc	*rxd;
1544	struct mbuf		*m;
1545	bus_dma_segment_t	segs[1];
1546	bus_dmamap_t		map;
1547	int			nsegs;
1548
1549	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1550	if (m == NULL)
1551		return (ENOBUFS);
1552	m->m_len = m->m_pkthdr.len = MCLBYTES;
1553	m_adj(m, sizeof(uint64_t));
1554
1555	if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1556	    sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1557		m_freem(m);
1558		return (ENOBUFS);
1559	}
1560	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1561
1562	rxd = &sc->arge_cdata.arge_rxdesc[idx];
1563	if (rxd->rx_m != NULL) {
1564		bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1565	}
1566	map = rxd->rx_dmamap;
1567	rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1568	sc->arge_cdata.arge_rx_sparemap = map;
1569	rxd->rx_m = m;
1570	desc = rxd->desc;
1571	if (segs[0].ds_addr & 3)
1572		panic("RX packet address unaligned");
1573	desc->packet_addr = segs[0].ds_addr;
1574	desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1575
1576	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1577	    sc->arge_cdata.arge_rx_ring_map,
1578	    BUS_DMASYNC_PREWRITE);
1579
1580	return (0);
1581}
1582
1583static __inline void
1584arge_fixup_rx(struct mbuf *m)
1585{
1586	int		i;
1587	uint16_t	*src, *dst;
1588
1589	src = mtod(m, uint16_t *);
1590	dst = src - 1;
1591
1592	for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1593		*dst++ = *src++;
1594	}
1595
1596	if (m->m_len % sizeof(uint16_t))
1597		*(uint8_t *)dst = *(uint8_t *)src;
1598
1599	m->m_data -= ETHER_ALIGN;
1600}
1601
1602#ifdef DEVICE_POLLING
1603static int
1604arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1605{
1606	struct arge_softc *sc = ifp->if_softc;
1607	int rx_npkts = 0;
1608
1609	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1610		ARGE_LOCK(sc);
1611		arge_tx_locked(sc);
1612		rx_npkts = arge_rx_locked(sc);
1613		ARGE_UNLOCK(sc);
1614	}
1615
1616	return (rx_npkts);
1617}
1618#endif /* DEVICE_POLLING */
1619
1620
1621static void
1622arge_tx_locked(struct arge_softc *sc)
1623{
1624	struct arge_txdesc	*txd;
1625	struct arge_desc	*cur_tx;
1626	struct ifnet		*ifp;
1627	uint32_t		ctrl;
1628	int			cons, prod;
1629
1630	ARGE_LOCK_ASSERT(sc);
1631
1632	cons = sc->arge_cdata.arge_tx_cons;
1633	prod = sc->arge_cdata.arge_tx_prod;
1634
1635	ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons, prod);
1636
1637	if (cons == prod)
1638		return;
1639
1640	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1641	    sc->arge_cdata.arge_tx_ring_map,
1642	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1643
1644	ifp = sc->arge_ifp;
1645	/*
1646	 * Go through our tx list and free mbufs for those
1647	 * frames that have been transmitted.
1648	 */
1649	for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1650		cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1651		ctrl = cur_tx->packet_ctrl;
1652		/* Check if descriptor has "finished" flag */
1653		if ((ctrl & ARGE_DESC_EMPTY) == 0)
1654			break;
1655
1656		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1657
1658		sc->arge_cdata.arge_tx_cnt--;
1659		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1660
1661		txd = &sc->arge_cdata.arge_txdesc[cons];
1662
1663		ifp->if_opackets++;
1664
1665		bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1666		    BUS_DMASYNC_POSTWRITE);
1667		bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1668
1669		/* Free only if it's first descriptor in list */
1670		if (txd->tx_m)
1671			m_freem(txd->tx_m);
1672		txd->tx_m = NULL;
1673
1674		/* reset descriptor */
1675		cur_tx->packet_addr = 0;
1676	}
1677
1678	sc->arge_cdata.arge_tx_cons = cons;
1679
1680	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1681	    sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1682}
1683
1684
1685static int
1686arge_rx_locked(struct arge_softc *sc)
1687{
1688	struct arge_rxdesc	*rxd;
1689	struct ifnet		*ifp = sc->arge_ifp;
1690	int			cons, prog, packet_len, i;
1691	struct arge_desc	*cur_rx;
1692	struct mbuf		*m;
1693	int			rx_npkts = 0;
1694
1695	ARGE_LOCK_ASSERT(sc);
1696
1697	cons = sc->arge_cdata.arge_rx_cons;
1698
1699	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1700	    sc->arge_cdata.arge_rx_ring_map,
1701	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1702
1703	for (prog = 0; prog < ARGE_RX_RING_COUNT;
1704	    ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1705		cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1706		rxd = &sc->arge_cdata.arge_rxdesc[cons];
1707		m = rxd->rx_m;
1708
1709		if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1710		       break;
1711
1712		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1713
1714		prog++;
1715
1716		packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1717		bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1718		    BUS_DMASYNC_POSTREAD);
1719		m = rxd->rx_m;
1720
1721		arge_fixup_rx(m);
1722		m->m_pkthdr.rcvif = ifp;
1723		/* Skip 4 bytes of CRC */
1724		m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1725		ifp->if_ipackets++;
1726		rx_npkts++;
1727
1728		ARGE_UNLOCK(sc);
1729		(*ifp->if_input)(ifp, m);
1730		ARGE_LOCK(sc);
1731		cur_rx->packet_addr = 0;
1732	}
1733
1734	if (prog > 0) {
1735
1736		i = sc->arge_cdata.arge_rx_cons;
1737		for (; prog > 0 ; prog--) {
1738			if (arge_newbuf(sc, i) != 0) {
1739				device_printf(sc->arge_dev,
1740				    "Failed to allocate buffer\n");
1741				break;
1742			}
1743			ARGE_INC(i, ARGE_RX_RING_COUNT);
1744		}
1745
1746		bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1747		    sc->arge_cdata.arge_rx_ring_map,
1748		    BUS_DMASYNC_PREWRITE);
1749
1750		sc->arge_cdata.arge_rx_cons = cons;
1751	}
1752
1753	return (rx_npkts);
1754}
1755
1756static int
1757arge_intr_filter(void *arg)
1758{
1759	struct arge_softc	*sc = arg;
1760	uint32_t		status, ints;
1761
1762	status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1763	ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1764
1765	ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints,
1766	    "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1767	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1768	ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status,
1769	    "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1770	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1771
1772	if (status & DMA_INTR_ALL) {
1773		sc->arge_intr_status |= status;
1774		ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1775		return (FILTER_SCHEDULE_THREAD);
1776	}
1777
1778	sc->arge_intr_status = 0;
1779	return (FILTER_STRAY);
1780}
1781
1782static void
1783arge_intr(void *arg)
1784{
1785	struct arge_softc	*sc = arg;
1786	uint32_t		status;
1787	struct ifnet		*ifp = sc->arge_ifp;
1788
1789	status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1790	status |= sc->arge_intr_status;
1791
1792	ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status,
1793	    "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
1794	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1795
1796	/*
1797	 * Is it our interrupt at all?
1798	 */
1799	if (status == 0)
1800		return;
1801
1802	if (status & DMA_INTR_RX_BUS_ERROR) {
1803		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1804		device_printf(sc->arge_dev, "RX bus error");
1805		return;
1806	}
1807
1808	if (status & DMA_INTR_TX_BUS_ERROR) {
1809		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1810		device_printf(sc->arge_dev, "TX bus error");
1811		return;
1812	}
1813
1814	ARGE_LOCK(sc);
1815
1816	if (status & DMA_INTR_RX_PKT_RCVD)
1817		arge_rx_locked(sc);
1818
1819	/*
1820	 * RX overrun disables the receiver.
1821	 * Clear indication and re-enable rx.
1822	 */
1823	if ( status & DMA_INTR_RX_OVERFLOW) {
1824		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1825		ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1826		sc->stats.rx_overflow++;
1827	}
1828
1829	if (status & DMA_INTR_TX_PKT_SENT)
1830		arge_tx_locked(sc);
1831	/*
1832	 * Underrun turns off TX. Clear underrun indication.
1833	 * If there's anything left in the ring, reactivate the tx.
1834	 */
1835	if (status & DMA_INTR_TX_UNDERRUN) {
1836		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1837		sc->stats.tx_underflow++;
1838		ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n", __func__, sc->arge_cdata.arge_tx_cnt);
1839		if (sc->arge_cdata.arge_tx_cnt > 0 ) {
1840			ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
1841			    DMA_TX_CONTROL_EN);
1842		}
1843	}
1844
1845	/*
1846	 * If we've finished TXing and there's space for more packets
1847	 * to be queued for TX, do so. Otherwise we may end up in a
1848	 * situation where the interface send queue was filled
1849	 * whilst the hardware queue was full, then the hardware
1850	 * queue was drained by the interface send queue wasn't,
1851	 * and thus if_start() is never called to kick-start
1852	 * the send process (and all subsequent packets are simply
1853	 * discarded.
1854	 *
1855	 * XXX TODO: make sure that the hardware deals nicely
1856	 * with the possibility of the queue being enabled above
1857	 * after a TX underrun, then having the hardware queue added
1858	 * to below.
1859	 */
1860	if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) &&
1861	    (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1862		if (!IFQ_IS_EMPTY(&ifp->if_snd))
1863			arge_start_locked(ifp);
1864	}
1865
1866	/*
1867	 * We handled all bits, clear status
1868	 */
1869	sc->arge_intr_status = 0;
1870	ARGE_UNLOCK(sc);
1871	/*
1872	 * re-enable all interrupts
1873	 */
1874	ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1875}
1876
1877
1878static void
1879arge_tick(void *xsc)
1880{
1881	struct arge_softc	*sc = xsc;
1882	struct mii_data		*mii;
1883
1884	ARGE_LOCK_ASSERT(sc);
1885
1886	if (sc->arge_miibus) {
1887		mii = device_get_softc(sc->arge_miibus);
1888		mii_tick(mii);
1889		callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1890	}
1891}
1892
1893int
1894arge_multiphy_mediachange(struct ifnet *ifp)
1895{
1896	struct arge_softc *sc = ifp->if_softc;
1897	struct ifmedia *ifm = &sc->arge_ifmedia;
1898	struct ifmedia_entry *ife = ifm->ifm_cur;
1899
1900	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1901		return (EINVAL);
1902
1903	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
1904		device_printf(sc->arge_dev,
1905		    "AUTO is not supported for multiphy MAC");
1906		return (EINVAL);
1907	}
1908
1909	/*
1910	 * Ignore everything
1911	 */
1912	return (0);
1913}
1914
1915void
1916arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1917{
1918	struct arge_softc *sc = ifp->if_softc;
1919
1920	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1921	ifmr->ifm_active = IFM_ETHER | sc->arge_media_type |
1922	    sc->arge_duplex_mode;
1923}
1924
1925