if_arge.c revision 220354
1/*-
2 * Copyright (c) 2009, Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/mips/atheros/if_arge.c 220354 2011-04-05 05:15:48Z adrian $");
30
31/*
32 * AR71XX gigabit ethernet driver
33 */
34#ifdef HAVE_KERNEL_OPTION_HEADERS
35#include "opt_device_polling.h"
36#endif
37
38#include <sys/param.h>
39#include <sys/endian.h>
40#include <sys/systm.h>
41#include <sys/sockio.h>
42#include <sys/mbuf.h>
43#include <sys/malloc.h>
44#include <sys/kernel.h>
45#include <sys/module.h>
46#include <sys/socket.h>
47#include <sys/taskqueue.h>
48#include <sys/sysctl.h>
49
50#include <net/if.h>
51#include <net/if_arp.h>
52#include <net/ethernet.h>
53#include <net/if_dl.h>
54#include <net/if_media.h>
55#include <net/if_types.h>
56
57#include <net/bpf.h>
58
59#include <machine/bus.h>
60#include <machine/cache.h>
61#include <machine/resource.h>
62#include <vm/vm_param.h>
63#include <vm/vm.h>
64#include <vm/pmap.h>
65#include <machine/pmap.h>
66#include <sys/bus.h>
67#include <sys/rman.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/miivar.h>
71
72#include <dev/pci/pcireg.h>
73#include <dev/pci/pcivar.h>
74
75MODULE_DEPEND(arge, ether, 1, 1, 1);
76MODULE_DEPEND(arge, miibus, 1, 1, 1);
77
78#include "miibus_if.h"
79
80#include <mips/atheros/ar71xxreg.h>
81#include <mips/atheros/if_argevar.h>
82#include <mips/atheros/ar71xx_setup.h>
83#include <mips/atheros/ar71xx_cpudef.h>
84
85typedef enum {
86	ARGE_DBG_MII 	=	0x00000001,
87	ARGE_DBG_INTR	=	0x00000002
88} arge_debug_flags;
89
90#undef ARGE_DEBUG
91#ifdef ARGE_DEBUG
92#define	ARGEDEBUG(_sc, _m, ...) 					\
93	do {								\
94		if ((_m) & (_sc)->arge_debug)				\
95			device_printf((_sc)->arge_dev, __VA_ARGS__);	\
96	} while (0)
97#else
98#define	ARGEDEBUG(_sc, _m, ...)
99#endif
100
101static int arge_attach(device_t);
102static int arge_detach(device_t);
103static void arge_flush_ddr(struct arge_softc *);
104static int arge_ifmedia_upd(struct ifnet *);
105static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
106static int arge_ioctl(struct ifnet *, u_long, caddr_t);
107static void arge_init(void *);
108static void arge_init_locked(struct arge_softc *);
109static void arge_link_task(void *, int);
110static void arge_set_pll(struct arge_softc *, int, int);
111static int arge_miibus_readreg(device_t, int, int);
112static void arge_miibus_statchg(device_t);
113static int arge_miibus_writereg(device_t, int, int, int);
114static int arge_probe(device_t);
115static void arge_reset_dma(struct arge_softc *);
116static int arge_resume(device_t);
117static int arge_rx_ring_init(struct arge_softc *);
118static int arge_tx_ring_init(struct arge_softc *);
119#ifdef DEVICE_POLLING
120static int arge_poll(struct ifnet *, enum poll_cmd, int);
121#endif
122static int arge_shutdown(device_t);
123static void arge_start(struct ifnet *);
124static void arge_start_locked(struct ifnet *);
125static void arge_stop(struct arge_softc *);
126static int arge_suspend(device_t);
127
128static int arge_rx_locked(struct arge_softc *);
129static void arge_tx_locked(struct arge_softc *);
130static void arge_intr(void *);
131static int arge_intr_filter(void *);
132static void arge_tick(void *);
133
134/*
135 * ifmedia callbacks for multiPHY MAC
136 */
137void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
138int arge_multiphy_mediachange(struct ifnet *);
139
140static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
141static int arge_dma_alloc(struct arge_softc *);
142static void arge_dma_free(struct arge_softc *);
143static int arge_newbuf(struct arge_softc *, int);
144static __inline void arge_fixup_rx(struct mbuf *);
145
146static device_method_t arge_methods[] = {
147	/* Device interface */
148	DEVMETHOD(device_probe,		arge_probe),
149	DEVMETHOD(device_attach,	arge_attach),
150	DEVMETHOD(device_detach,	arge_detach),
151	DEVMETHOD(device_suspend,	arge_suspend),
152	DEVMETHOD(device_resume,	arge_resume),
153	DEVMETHOD(device_shutdown,	arge_shutdown),
154
155	/* bus interface */
156	DEVMETHOD(bus_print_child,	bus_generic_print_child),
157	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
158
159	/* MII interface */
160	DEVMETHOD(miibus_readreg,	arge_miibus_readreg),
161	DEVMETHOD(miibus_writereg,	arge_miibus_writereg),
162	DEVMETHOD(miibus_statchg,	arge_miibus_statchg),
163
164	{ 0, 0 }
165};
166
167static driver_t arge_driver = {
168	"arge",
169	arge_methods,
170	sizeof(struct arge_softc)
171};
172
173static devclass_t arge_devclass;
174
175DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
176DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
177
178/*
179 * RedBoot passes MAC address to entry point as environment
180 * variable. platfrom_start parses it and stores in this variable
181 */
182extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
183
184static struct mtx miibus_mtx;
185
186MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
187
188
189/*
190 * Flushes all
191 */
192static void
193arge_flush_ddr(struct arge_softc *sc)
194{
195	if (sc->arge_mac_unit == 0)
196		ar71xx_device_flush_ddr_ge0();
197	else
198		ar71xx_device_flush_ddr_ge1();
199}
200
201static int
202arge_probe(device_t dev)
203{
204
205	device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
206	return (0);
207}
208
209static void
210arge_attach_sysctl(device_t dev)
211{
212	struct arge_softc *sc = device_get_softc(dev);
213	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
214	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
215
216	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
217		"debug", CTLFLAG_RW, &sc->arge_debug, 0,
218		"arge interface debugging flags");
219
220	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
221		"tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
222		"number of TX aligned packets");
223
224	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
225		"tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0,
226		"number of TX unaligned packets");
227
228	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod", CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, "");
229	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons", CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, "");
230	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt", CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, "");
231}
232
233static int
234arge_attach(device_t dev)
235{
236	uint8_t			eaddr[ETHER_ADDR_LEN];
237	struct ifnet		*ifp;
238	struct arge_softc	*sc;
239	int			error = 0, rid, phymask;
240	uint32_t		reg, rnd;
241	int			is_base_mac_empty, i, phys_total;
242	uint32_t		hint;
243	long			eeprom_mac_addr = 0;
244
245	sc = device_get_softc(dev);
246	sc->arge_dev = dev;
247	sc->arge_mac_unit = device_get_unit(dev);
248
249	/*
250	 * Some units (eg the TP-Link WR-1043ND) do not have a convenient
251	 * EEPROM location to read the ethernet MAC address from.
252	 * OpenWRT simply snaffles it from a fixed location.
253	 *
254	 * Since multiple units seem to use this feature, include
255	 * a method of setting the MAC address based on an flash location
256	 * in CPU address space.
257	 */
258	if (sc->arge_mac_unit == 0 &&
259	    resource_long_value(device_get_name(dev), device_get_unit(dev),
260	    "eeprommac", &eeprom_mac_addr) == 0) {
261		int i;
262		const char *mac = (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr);
263		device_printf(dev, "Overriding MAC from EEPROM\n");
264		for (i = 0; i < 6; i++) {
265			ar711_base_mac[i] = mac[i];
266		}
267	}
268
269	KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
270	    ("if_arge: Only MAC0 and MAC1 supported"));
271
272	/*
273	 *  Get which PHY of 5 available we should use for this unit
274	 */
275	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
276	    "phymask", &phymask) != 0) {
277		/*
278		 * Use port 4 (WAN) for GE0. For any other port use
279		 * its PHY the same as its unit number
280		 */
281		if (sc->arge_mac_unit == 0)
282			phymask = (1 << 4);
283		else
284			/* Use all phys up to 4 */
285			phymask = (1 << 4) - 1;
286
287		device_printf(dev, "No PHY specified, using mask %d\n", phymask);
288	}
289
290	/*
291	 *  Get default media & duplex mode, by default its Base100T
292	 *  and full duplex
293	 */
294	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
295	    "media", &hint) != 0)
296		hint = 0;
297
298	if (hint == 1000)
299		sc->arge_media_type = IFM_1000_T;
300	else
301		sc->arge_media_type = IFM_100_TX;
302
303	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
304	    "fduplex", &hint) != 0)
305		hint = 1;
306
307	if (hint)
308		sc->arge_duplex_mode = IFM_FDX;
309	else
310		sc->arge_duplex_mode = 0;
311
312	sc->arge_phymask = phymask;
313
314	mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
315	    MTX_DEF);
316	callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
317	TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
318
319	/* Map control/status registers. */
320	sc->arge_rid = 0;
321	sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
322	    &sc->arge_rid, RF_ACTIVE);
323
324	if (sc->arge_res == NULL) {
325		device_printf(dev, "couldn't map memory\n");
326		error = ENXIO;
327		goto fail;
328	}
329
330	/* Allocate interrupts */
331	rid = 0;
332	sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
333	    RF_SHAREABLE | RF_ACTIVE);
334
335	if (sc->arge_irq == NULL) {
336		device_printf(dev, "couldn't map interrupt\n");
337		error = ENXIO;
338		goto fail;
339	}
340
341	/* Allocate ifnet structure. */
342	ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
343
344	if (ifp == NULL) {
345		device_printf(dev, "couldn't allocate ifnet structure\n");
346		error = ENOSPC;
347		goto fail;
348	}
349
350	ifp->if_softc = sc;
351	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
352	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
353	ifp->if_ioctl = arge_ioctl;
354	ifp->if_start = arge_start;
355	ifp->if_init = arge_init;
356	sc->arge_if_flags = ifp->if_flags;
357
358	/* XXX: add real size */
359	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
360	ifp->if_snd.ifq_maxlen = ifqmaxlen;
361	IFQ_SET_READY(&ifp->if_snd);
362
363	ifp->if_capenable = ifp->if_capabilities;
364#ifdef DEVICE_POLLING
365	ifp->if_capabilities |= IFCAP_POLLING;
366#endif
367
368	is_base_mac_empty = 1;
369	for (i = 0; i < ETHER_ADDR_LEN; i++) {
370		eaddr[i] = ar711_base_mac[i] & 0xff;
371		if (eaddr[i] != 0)
372			is_base_mac_empty = 0;
373	}
374
375	if (is_base_mac_empty) {
376		/*
377		 * No MAC address configured. Generate the random one.
378		 */
379		if  (bootverbose)
380			device_printf(dev,
381			    "Generating random ethernet address.\n");
382
383		rnd = arc4random();
384		eaddr[0] = 'b';
385		eaddr[1] = 's';
386		eaddr[2] = 'd';
387		eaddr[3] = (rnd >> 24) & 0xff;
388		eaddr[4] = (rnd >> 16) & 0xff;
389		eaddr[5] = (rnd >> 8) & 0xff;
390	}
391
392	if (sc->arge_mac_unit != 0)
393		eaddr[5] +=  sc->arge_mac_unit;
394
395	if (arge_dma_alloc(sc) != 0) {
396		error = ENXIO;
397		goto fail;
398	}
399
400	/* Initialize the MAC block */
401
402	/* Step 1. Soft-reset MAC */
403	ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
404	DELAY(20);
405
406	/* Step 2. Punt the MAC core from the central reset register */
407	ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
408	DELAY(100);
409	ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
410
411	/* Step 3. Reconfigure MAC block */
412	ARGE_WRITE(sc, AR71XX_MAC_CFG1,
413		MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
414		MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
415
416	reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
417	reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
418	ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
419
420	ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
421
422	/* Reset MII bus */
423	ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
424	DELAY(100);
425	ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
426	DELAY(100);
427
428	/*
429	 * Set all Ethernet address registers to the same initial values
430	 * set all four addresses to 66-88-aa-cc-dd-ee
431	 */
432	ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1,
433	    (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8)  | eaddr[5]);
434	ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
435
436	ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
437	    FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
438
439	switch (ar71xx_soc) {
440		case AR71XX_SOC_AR7240:
441		case AR71XX_SOC_AR7241:
442		case AR71XX_SOC_AR7242:
443			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff);
444			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa);
445			break;
446		default:
447			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
448			ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
449	}
450
451	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
452	    FIFO_RX_FILTMATCH_DEFAULT);
453
454	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
455	    FIFO_RX_FILTMASK_DEFAULT);
456
457	/*
458	 * Check if we have single-PHY MAC or multi-PHY
459	 */
460	phys_total = 0;
461	for (i = 0; i < ARGE_NPHY; i++)
462		if (phymask & (1 << i))
463			phys_total ++;
464
465	if (phys_total == 0) {
466		error = EINVAL;
467		goto fail;
468	}
469
470	if (phys_total == 1) {
471		/* Do MII setup. */
472		error = mii_attach(dev, &sc->arge_miibus, ifp,
473		    arge_ifmedia_upd, arge_ifmedia_sts, BMSR_DEFCAPMASK,
474		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
475		if (error != 0) {
476			device_printf(dev, "attaching PHYs failed\n");
477			goto fail;
478		}
479	}
480	else {
481		ifmedia_init(&sc->arge_ifmedia, 0,
482		    arge_multiphy_mediachange,
483		    arge_multiphy_mediastatus);
484		ifmedia_add(&sc->arge_ifmedia,
485		    IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode,
486		    0, NULL);
487		ifmedia_set(&sc->arge_ifmedia,
488		    IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode);
489		arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
490	}
491
492	/* Call MI attach routine. */
493	ether_ifattach(ifp, eaddr);
494
495	/* Hook interrupt last to avoid having to lock softc */
496	error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
497	    arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
498
499	if (error) {
500		device_printf(dev, "couldn't set up irq\n");
501		ether_ifdetach(ifp);
502		goto fail;
503	}
504
505	/* setup sysctl variables */
506	arge_attach_sysctl(dev);
507
508fail:
509	if (error)
510		arge_detach(dev);
511
512	return (error);
513}
514
515static int
516arge_detach(device_t dev)
517{
518	struct arge_softc	*sc = device_get_softc(dev);
519	struct ifnet		*ifp = sc->arge_ifp;
520
521	KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
522
523	/* These should only be active if attach succeeded */
524	if (device_is_attached(dev)) {
525		ARGE_LOCK(sc);
526		sc->arge_detach = 1;
527#ifdef DEVICE_POLLING
528		if (ifp->if_capenable & IFCAP_POLLING)
529			ether_poll_deregister(ifp);
530#endif
531
532		arge_stop(sc);
533		ARGE_UNLOCK(sc);
534		taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
535		ether_ifdetach(ifp);
536	}
537
538	if (sc->arge_miibus)
539		device_delete_child(dev, sc->arge_miibus);
540
541	bus_generic_detach(dev);
542
543	if (sc->arge_intrhand)
544		bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
545
546	if (sc->arge_res)
547		bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
548		    sc->arge_res);
549
550	if (ifp)
551		if_free(ifp);
552
553	arge_dma_free(sc);
554
555	mtx_destroy(&sc->arge_mtx);
556
557	return (0);
558
559}
560
561static int
562arge_suspend(device_t dev)
563{
564
565	panic("%s", __func__);
566	return 0;
567}
568
569static int
570arge_resume(device_t dev)
571{
572
573	panic("%s", __func__);
574	return 0;
575}
576
577static int
578arge_shutdown(device_t dev)
579{
580	struct arge_softc	*sc;
581
582	sc = device_get_softc(dev);
583
584	ARGE_LOCK(sc);
585	arge_stop(sc);
586	ARGE_UNLOCK(sc);
587
588	return (0);
589}
590
591static int
592arge_miibus_readreg(device_t dev, int phy, int reg)
593{
594	struct arge_softc * sc = device_get_softc(dev);
595	int i, result;
596	uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
597	    | (reg & MAC_MII_REG_MASK);
598
599	if ((sc->arge_phymask  & (1 << phy)) == 0)
600		return (0);
601
602	mtx_lock(&miibus_mtx);
603	ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
604	ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
605	ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
606
607	i = ARGE_MII_TIMEOUT;
608	while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
609	    MAC_MII_INDICATOR_BUSY) && (i--))
610		DELAY(5);
611
612	if (i < 0) {
613		mtx_unlock(&miibus_mtx);
614		ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
615		/* XXX: return ERRNO istead? */
616		return (-1);
617	}
618
619	result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
620	ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
621	mtx_unlock(&miibus_mtx);
622
623	ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__,
624		 phy, reg, addr, result);
625
626	return (result);
627}
628
629static int
630arge_miibus_writereg(device_t dev, int phy, int reg, int data)
631{
632	struct arge_softc * sc = device_get_softc(dev);
633	int i;
634	uint32_t addr =
635	    (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
636
637
638	if ((sc->arge_phymask  & (1 << phy)) == 0)
639		return (-1);
640
641	ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__,
642	    phy, reg, data);
643
644	mtx_lock(&miibus_mtx);
645	ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
646	ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data);
647
648	i = ARGE_MII_TIMEOUT;
649	while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) &
650	    MAC_MII_INDICATOR_BUSY) && (i--))
651		DELAY(5);
652
653	mtx_unlock(&miibus_mtx);
654
655	if (i < 0) {
656		ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
657		/* XXX: return ERRNO istead? */
658		return (-1);
659	}
660
661	return (0);
662}
663
664static void
665arge_miibus_statchg(device_t dev)
666{
667	struct arge_softc		*sc;
668
669	sc = device_get_softc(dev);
670	taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
671}
672
673static void
674arge_link_task(void *arg, int pending)
675{
676	struct arge_softc	*sc;
677	struct mii_data		*mii;
678	struct ifnet		*ifp;
679	uint32_t		media, duplex;
680
681	sc = (struct arge_softc *)arg;
682
683	ARGE_LOCK(sc);
684	mii = device_get_softc(sc->arge_miibus);
685	ifp = sc->arge_ifp;
686	if (mii == NULL || ifp == NULL ||
687	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
688		ARGE_UNLOCK(sc);
689		return;
690	}
691
692	if (mii->mii_media_status & IFM_ACTIVE) {
693
694		media = IFM_SUBTYPE(mii->mii_media_active);
695
696		if (media != IFM_NONE) {
697			sc->arge_link_status = 1;
698			duplex = mii->mii_media_active & IFM_GMASK;
699			arge_set_pll(sc, media, duplex);
700		}
701	} else
702		sc->arge_link_status = 0;
703
704	ARGE_UNLOCK(sc);
705}
706
707static void
708arge_set_pll(struct arge_softc *sc, int media, int duplex)
709{
710	uint32_t		cfg, ifcontrol, rx_filtmask;
711	uint32_t		fifo_tx;
712	int if_speed;
713
714	cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
715	cfg &= ~(MAC_CFG2_IFACE_MODE_1000
716	    | MAC_CFG2_IFACE_MODE_10_100
717	    | MAC_CFG2_FULL_DUPLEX);
718
719	if (duplex == IFM_FDX)
720		cfg |= MAC_CFG2_FULL_DUPLEX;
721
722	ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
723	ifcontrol &= ~MAC_IFCONTROL_SPEED;
724	rx_filtmask =
725	    ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
726	rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
727
728	switch(media) {
729	case IFM_10_T:
730		cfg |= MAC_CFG2_IFACE_MODE_10_100;
731		if_speed = 10;
732		break;
733	case IFM_100_TX:
734		cfg |= MAC_CFG2_IFACE_MODE_10_100;
735		ifcontrol |= MAC_IFCONTROL_SPEED;
736		if_speed = 100;
737		break;
738	case IFM_1000_T:
739	case IFM_1000_SX:
740		cfg |= MAC_CFG2_IFACE_MODE_1000;
741		rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
742		if_speed = 1000;
743		break;
744	default:
745		if_speed = 100;
746		device_printf(sc->arge_dev,
747		    "Unknown media %d\n", media);
748	}
749
750	switch (ar71xx_soc) {
751		case AR71XX_SOC_AR7240:
752		case AR71XX_SOC_AR7241:
753		case AR71XX_SOC_AR7242:
754			fifo_tx = 0x01f00140;
755			break;
756		case AR71XX_SOC_AR9130:
757		case AR71XX_SOC_AR9132:
758			fifo_tx = 0x00780fff;
759			break;
760		default:
761			fifo_tx = 0x008001ff;
762	}
763
764	ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
765	ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
766	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
767	    rx_filtmask);
768	ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx);
769
770	/* set PLL registers */
771	if (sc->arge_mac_unit == 0)
772		ar71xx_device_set_pll_ge0(if_speed);
773	else
774		ar71xx_device_set_pll_ge1(if_speed);
775}
776
777
778static void
779arge_reset_dma(struct arge_softc *sc)
780{
781	ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
782	ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
783
784	ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
785	ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
786
787	/* Clear all possible RX interrupts */
788	while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
789		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
790
791	/*
792	 * Clear all possible TX interrupts
793	 */
794	while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
795		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
796
797	/*
798	 * Now Rx/Tx errors
799	 */
800	ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
801	    DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
802	ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
803	    DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
804}
805
806
807
808static void
809arge_init(void *xsc)
810{
811	struct arge_softc	 *sc = xsc;
812
813	ARGE_LOCK(sc);
814	arge_init_locked(sc);
815	ARGE_UNLOCK(sc);
816}
817
818static void
819arge_init_locked(struct arge_softc *sc)
820{
821	struct ifnet		*ifp = sc->arge_ifp;
822	struct mii_data		*mii;
823
824	ARGE_LOCK_ASSERT(sc);
825
826	arge_stop(sc);
827
828	/* Init circular RX list. */
829	if (arge_rx_ring_init(sc) != 0) {
830		device_printf(sc->arge_dev,
831		    "initialization failed: no memory for rx buffers\n");
832		arge_stop(sc);
833		return;
834	}
835
836	/* Init tx descriptors. */
837	arge_tx_ring_init(sc);
838
839	arge_reset_dma(sc);
840
841
842	if (sc->arge_miibus) {
843		sc->arge_link_status = 0;
844		mii = device_get_softc(sc->arge_miibus);
845		mii_mediachg(mii);
846	}
847	else {
848		/*
849		 * Sun always shines over multiPHY interface
850		 */
851		sc->arge_link_status = 1;
852	}
853
854	ifp->if_drv_flags |= IFF_DRV_RUNNING;
855	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
856
857	if (sc->arge_miibus)
858		callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
859
860	ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
861	ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
862
863	/* Start listening */
864	ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
865
866	/* Enable interrupts */
867	ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
868}
869
870/*
871 * Return whether the mbuf chain is correctly aligned
872 * for the arge TX engine.
873 *
874 * The TX engine requires each fragment to be aligned to a
875 * 4 byte boundary and the size of each fragment except
876 * the last to be a multiple of 4 bytes.
877 */
878static int
879arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
880{
881	struct mbuf *m;
882
883	for (m = m0; m != NULL; m = m->m_next) {
884		if((mtod(m, intptr_t) & 3) != 0)
885			return 0;
886		if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
887			return 0;
888	}
889	return 1;
890}
891
892/*
893 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
894 * pointers to the fragment pointers.
895 */
896static int
897arge_encap(struct arge_softc *sc, struct mbuf **m_head)
898{
899	struct arge_txdesc	*txd;
900	struct arge_desc	*desc, *prev_desc;
901	bus_dma_segment_t	txsegs[ARGE_MAXFRAGS];
902	int			error, i, nsegs, prod, prev_prod;
903	struct mbuf		*m;
904
905	ARGE_LOCK_ASSERT(sc);
906
907	/*
908	 * Fix mbuf chain, all fragments should be 4 bytes aligned and
909	 * even 4 bytes
910	 */
911	m = *m_head;
912	if (! arge_mbuf_chain_is_tx_aligned(m)) {
913		sc->stats.tx_pkts_unaligned++;
914		m = m_defrag(*m_head, M_DONTWAIT);
915		if (m == NULL) {
916			*m_head = NULL;
917			return (ENOBUFS);
918		}
919		*m_head = m;
920	} else
921		sc->stats.tx_pkts_aligned++;
922
923	prod = sc->arge_cdata.arge_tx_prod;
924	txd = &sc->arge_cdata.arge_txdesc[prod];
925	error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
926	    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
927
928	if (error == EFBIG) {
929		panic("EFBIG");
930	} else if (error != 0)
931		return (error);
932
933	if (nsegs == 0) {
934		m_freem(*m_head);
935		*m_head = NULL;
936		return (EIO);
937	}
938
939	/* Check number of available descriptors. */
940	if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
941		bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
942		return (ENOBUFS);
943	}
944
945	txd->tx_m = *m_head;
946	bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
947	    BUS_DMASYNC_PREWRITE);
948
949	/*
950	 * Make a list of descriptors for this packet. DMA controller will
951	 * walk through it while arge_link is not zero.
952	 */
953	prev_prod = prod;
954	desc = prev_desc = NULL;
955	for (i = 0; i < nsegs; i++) {
956		desc = &sc->arge_rdata.arge_tx_ring[prod];
957		desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
958
959		if (txsegs[i].ds_addr & 3)
960			panic("TX packet address unaligned\n");
961
962		desc->packet_addr = txsegs[i].ds_addr;
963
964		/* link with previous descriptor */
965		if (prev_desc)
966			prev_desc->packet_ctrl |= ARGE_DESC_MORE;
967
968		sc->arge_cdata.arge_tx_cnt++;
969		prev_desc = desc;
970		ARGE_INC(prod, ARGE_TX_RING_COUNT);
971	}
972
973	/* Update producer index. */
974	sc->arge_cdata.arge_tx_prod = prod;
975
976	/* Sync descriptors. */
977	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
978	    sc->arge_cdata.arge_tx_ring_map,
979	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980
981	/* Start transmitting */
982	ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
983	return (0);
984}
985
986static void
987arge_start(struct ifnet *ifp)
988{
989	struct arge_softc	 *sc;
990
991	sc = ifp->if_softc;
992
993	ARGE_LOCK(sc);
994	arge_start_locked(ifp);
995	ARGE_UNLOCK(sc);
996}
997
998static void
999arge_start_locked(struct ifnet *ifp)
1000{
1001	struct arge_softc	*sc;
1002	struct mbuf		*m_head;
1003	int			enq;
1004
1005	sc = ifp->if_softc;
1006
1007	ARGE_LOCK_ASSERT(sc);
1008
1009	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1010	    IFF_DRV_RUNNING || sc->arge_link_status == 0 )
1011		return;
1012
1013	arge_flush_ddr(sc);
1014
1015	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1016	    sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
1017		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1018		if (m_head == NULL)
1019			break;
1020
1021
1022		/*
1023		 * Pack the data into the transmit ring.
1024		 */
1025		if (arge_encap(sc, &m_head)) {
1026			if (m_head == NULL)
1027				break;
1028			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1029			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1030			break;
1031		}
1032
1033		enq++;
1034		/*
1035		 * If there's a BPF listener, bounce a copy of this frame
1036		 * to him.
1037		 */
1038		ETHER_BPF_MTAP(ifp, m_head);
1039	}
1040}
1041
1042static void
1043arge_stop(struct arge_softc *sc)
1044{
1045	struct ifnet	    *ifp;
1046
1047	ARGE_LOCK_ASSERT(sc);
1048
1049	ifp = sc->arge_ifp;
1050	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1051	if (sc->arge_miibus)
1052		callout_stop(&sc->arge_stat_callout);
1053
1054	/* mask out interrupts */
1055	ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1056
1057	arge_reset_dma(sc);
1058}
1059
1060
1061static int
1062arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1063{
1064	struct arge_softc		*sc = ifp->if_softc;
1065	struct ifreq		*ifr = (struct ifreq *) data;
1066	struct mii_data		*mii;
1067	int			error;
1068#ifdef DEVICE_POLLING
1069	int			mask;
1070#endif
1071
1072	switch (command) {
1073	case SIOCSIFFLAGS:
1074		ARGE_LOCK(sc);
1075		if ((ifp->if_flags & IFF_UP) != 0) {
1076			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1077				if (((ifp->if_flags ^ sc->arge_if_flags)
1078				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1079					/* XXX: handle promisc & multi flags */
1080				}
1081
1082			} else {
1083				if (!sc->arge_detach)
1084					arge_init_locked(sc);
1085			}
1086		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1087			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1088			arge_stop(sc);
1089		}
1090		sc->arge_if_flags = ifp->if_flags;
1091		ARGE_UNLOCK(sc);
1092		error = 0;
1093		break;
1094	case SIOCADDMULTI:
1095	case SIOCDELMULTI:
1096		/* XXX: implement SIOCDELMULTI */
1097		error = 0;
1098		break;
1099	case SIOCGIFMEDIA:
1100	case SIOCSIFMEDIA:
1101		if (sc->arge_miibus) {
1102			mii = device_get_softc(sc->arge_miibus);
1103			error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1104		}
1105		else
1106			error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command);
1107		break;
1108	case SIOCSIFCAP:
1109		/* XXX: Check other capabilities */
1110#ifdef DEVICE_POLLING
1111		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1112		if (mask & IFCAP_POLLING) {
1113			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1114				ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1115				error = ether_poll_register(arge_poll, ifp);
1116				if (error)
1117					return error;
1118				ARGE_LOCK(sc);
1119				ifp->if_capenable |= IFCAP_POLLING;
1120				ARGE_UNLOCK(sc);
1121			} else {
1122				ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1123				error = ether_poll_deregister(ifp);
1124				ARGE_LOCK(sc);
1125				ifp->if_capenable &= ~IFCAP_POLLING;
1126				ARGE_UNLOCK(sc);
1127			}
1128		}
1129		error = 0;
1130		break;
1131#endif
1132	default:
1133		error = ether_ioctl(ifp, command, data);
1134		break;
1135	}
1136
1137	return (error);
1138}
1139
1140/*
1141 * Set media options.
1142 */
1143static int
1144arge_ifmedia_upd(struct ifnet *ifp)
1145{
1146	struct arge_softc		*sc;
1147	struct mii_data		*mii;
1148	struct mii_softc	*miisc;
1149	int			error;
1150
1151	sc = ifp->if_softc;
1152	ARGE_LOCK(sc);
1153	mii = device_get_softc(sc->arge_miibus);
1154	if (mii->mii_instance) {
1155		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1156			mii_phy_reset(miisc);
1157	}
1158	error = mii_mediachg(mii);
1159	ARGE_UNLOCK(sc);
1160
1161	return (error);
1162}
1163
1164/*
1165 * Report current media status.
1166 */
1167static void
1168arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1169{
1170	struct arge_softc		*sc = ifp->if_softc;
1171	struct mii_data		*mii;
1172
1173	mii = device_get_softc(sc->arge_miibus);
1174	ARGE_LOCK(sc);
1175	mii_pollstat(mii);
1176	ARGE_UNLOCK(sc);
1177	ifmr->ifm_active = mii->mii_media_active;
1178	ifmr->ifm_status = mii->mii_media_status;
1179}
1180
1181struct arge_dmamap_arg {
1182	bus_addr_t	arge_busaddr;
1183};
1184
1185static void
1186arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1187{
1188	struct arge_dmamap_arg	*ctx;
1189
1190	if (error != 0)
1191		return;
1192	ctx = arg;
1193	ctx->arge_busaddr = segs[0].ds_addr;
1194}
1195
1196static int
1197arge_dma_alloc(struct arge_softc *sc)
1198{
1199	struct arge_dmamap_arg	ctx;
1200	struct arge_txdesc	*txd;
1201	struct arge_rxdesc	*rxd;
1202	int			error, i;
1203
1204	/* Create parent DMA tag. */
1205	error = bus_dma_tag_create(
1206	    bus_get_dma_tag(sc->arge_dev),	/* parent */
1207	    1, 0,			/* alignment, boundary */
1208	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1209	    BUS_SPACE_MAXADDR,		/* highaddr */
1210	    NULL, NULL,			/* filter, filterarg */
1211	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1212	    0,				/* nsegments */
1213	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1214	    0,				/* flags */
1215	    NULL, NULL,			/* lockfunc, lockarg */
1216	    &sc->arge_cdata.arge_parent_tag);
1217	if (error != 0) {
1218		device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1219		goto fail;
1220	}
1221	/* Create tag for Tx ring. */
1222	error = bus_dma_tag_create(
1223	    sc->arge_cdata.arge_parent_tag,	/* parent */
1224	    ARGE_RING_ALIGN, 0,		/* alignment, boundary */
1225	    BUS_SPACE_MAXADDR,		/* lowaddr */
1226	    BUS_SPACE_MAXADDR,		/* highaddr */
1227	    NULL, NULL,			/* filter, filterarg */
1228	    ARGE_TX_DMA_SIZE,		/* maxsize */
1229	    1,				/* nsegments */
1230	    ARGE_TX_DMA_SIZE,		/* maxsegsize */
1231	    0,				/* flags */
1232	    NULL, NULL,			/* lockfunc, lockarg */
1233	    &sc->arge_cdata.arge_tx_ring_tag);
1234	if (error != 0) {
1235		device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1236		goto fail;
1237	}
1238
1239	/* Create tag for Rx ring. */
1240	error = bus_dma_tag_create(
1241	    sc->arge_cdata.arge_parent_tag,	/* parent */
1242	    ARGE_RING_ALIGN, 0,		/* alignment, boundary */
1243	    BUS_SPACE_MAXADDR,		/* lowaddr */
1244	    BUS_SPACE_MAXADDR,		/* highaddr */
1245	    NULL, NULL,			/* filter, filterarg */
1246	    ARGE_RX_DMA_SIZE,		/* maxsize */
1247	    1,				/* nsegments */
1248	    ARGE_RX_DMA_SIZE,		/* maxsegsize */
1249	    0,				/* flags */
1250	    NULL, NULL,			/* lockfunc, lockarg */
1251	    &sc->arge_cdata.arge_rx_ring_tag);
1252	if (error != 0) {
1253		device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1254		goto fail;
1255	}
1256
1257	/* Create tag for Tx buffers. */
1258	error = bus_dma_tag_create(
1259	    sc->arge_cdata.arge_parent_tag,	/* parent */
1260	    sizeof(uint32_t), 0,	/* alignment, boundary */
1261	    BUS_SPACE_MAXADDR,		/* lowaddr */
1262	    BUS_SPACE_MAXADDR,		/* highaddr */
1263	    NULL, NULL,			/* filter, filterarg */
1264	    MCLBYTES * ARGE_MAXFRAGS,	/* maxsize */
1265	    ARGE_MAXFRAGS,		/* nsegments */
1266	    MCLBYTES,			/* maxsegsize */
1267	    0,				/* flags */
1268	    NULL, NULL,			/* lockfunc, lockarg */
1269	    &sc->arge_cdata.arge_tx_tag);
1270	if (error != 0) {
1271		device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1272		goto fail;
1273	}
1274
1275	/* Create tag for Rx buffers. */
1276	error = bus_dma_tag_create(
1277	    sc->arge_cdata.arge_parent_tag,	/* parent */
1278	    ARGE_RX_ALIGN, 0,		/* alignment, boundary */
1279	    BUS_SPACE_MAXADDR,		/* lowaddr */
1280	    BUS_SPACE_MAXADDR,		/* highaddr */
1281	    NULL, NULL,			/* filter, filterarg */
1282	    MCLBYTES,			/* maxsize */
1283	    ARGE_MAXFRAGS,		/* nsegments */
1284	    MCLBYTES,			/* maxsegsize */
1285	    0,				/* flags */
1286	    NULL, NULL,			/* lockfunc, lockarg */
1287	    &sc->arge_cdata.arge_rx_tag);
1288	if (error != 0) {
1289		device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1290		goto fail;
1291	}
1292
1293	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1294	error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1295	    (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1296	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1297	if (error != 0) {
1298		device_printf(sc->arge_dev,
1299		    "failed to allocate DMA'able memory for Tx ring\n");
1300		goto fail;
1301	}
1302
1303	ctx.arge_busaddr = 0;
1304	error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1305	    sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1306	    ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1307	if (error != 0 || ctx.arge_busaddr == 0) {
1308		device_printf(sc->arge_dev,
1309		    "failed to load DMA'able memory for Tx ring\n");
1310		goto fail;
1311	}
1312	sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1313
1314	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1315	error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1316	    (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1317	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1318	if (error != 0) {
1319		device_printf(sc->arge_dev,
1320		    "failed to allocate DMA'able memory for Rx ring\n");
1321		goto fail;
1322	}
1323
1324	ctx.arge_busaddr = 0;
1325	error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1326	    sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1327	    ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1328	if (error != 0 || ctx.arge_busaddr == 0) {
1329		device_printf(sc->arge_dev,
1330		    "failed to load DMA'able memory for Rx ring\n");
1331		goto fail;
1332	}
1333	sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1334
1335	/* Create DMA maps for Tx buffers. */
1336	for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1337		txd = &sc->arge_cdata.arge_txdesc[i];
1338		txd->tx_m = NULL;
1339		txd->tx_dmamap = NULL;
1340		error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1341		    &txd->tx_dmamap);
1342		if (error != 0) {
1343			device_printf(sc->arge_dev,
1344			    "failed to create Tx dmamap\n");
1345			goto fail;
1346		}
1347	}
1348	/* Create DMA maps for Rx buffers. */
1349	if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1350	    &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1351		device_printf(sc->arge_dev,
1352		    "failed to create spare Rx dmamap\n");
1353		goto fail;
1354	}
1355	for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1356		rxd = &sc->arge_cdata.arge_rxdesc[i];
1357		rxd->rx_m = NULL;
1358		rxd->rx_dmamap = NULL;
1359		error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1360		    &rxd->rx_dmamap);
1361		if (error != 0) {
1362			device_printf(sc->arge_dev,
1363			    "failed to create Rx dmamap\n");
1364			goto fail;
1365		}
1366	}
1367
1368fail:
1369	return (error);
1370}
1371
1372static void
1373arge_dma_free(struct arge_softc *sc)
1374{
1375	struct arge_txdesc	*txd;
1376	struct arge_rxdesc	*rxd;
1377	int			i;
1378
1379	/* Tx ring. */
1380	if (sc->arge_cdata.arge_tx_ring_tag) {
1381		if (sc->arge_cdata.arge_tx_ring_map)
1382			bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1383			    sc->arge_cdata.arge_tx_ring_map);
1384		if (sc->arge_cdata.arge_tx_ring_map &&
1385		    sc->arge_rdata.arge_tx_ring)
1386			bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1387			    sc->arge_rdata.arge_tx_ring,
1388			    sc->arge_cdata.arge_tx_ring_map);
1389		sc->arge_rdata.arge_tx_ring = NULL;
1390		sc->arge_cdata.arge_tx_ring_map = NULL;
1391		bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1392		sc->arge_cdata.arge_tx_ring_tag = NULL;
1393	}
1394	/* Rx ring. */
1395	if (sc->arge_cdata.arge_rx_ring_tag) {
1396		if (sc->arge_cdata.arge_rx_ring_map)
1397			bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1398			    sc->arge_cdata.arge_rx_ring_map);
1399		if (sc->arge_cdata.arge_rx_ring_map &&
1400		    sc->arge_rdata.arge_rx_ring)
1401			bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1402			    sc->arge_rdata.arge_rx_ring,
1403			    sc->arge_cdata.arge_rx_ring_map);
1404		sc->arge_rdata.arge_rx_ring = NULL;
1405		sc->arge_cdata.arge_rx_ring_map = NULL;
1406		bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1407		sc->arge_cdata.arge_rx_ring_tag = NULL;
1408	}
1409	/* Tx buffers. */
1410	if (sc->arge_cdata.arge_tx_tag) {
1411		for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1412			txd = &sc->arge_cdata.arge_txdesc[i];
1413			if (txd->tx_dmamap) {
1414				bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1415				    txd->tx_dmamap);
1416				txd->tx_dmamap = NULL;
1417			}
1418		}
1419		bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1420		sc->arge_cdata.arge_tx_tag = NULL;
1421	}
1422	/* Rx buffers. */
1423	if (sc->arge_cdata.arge_rx_tag) {
1424		for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1425			rxd = &sc->arge_cdata.arge_rxdesc[i];
1426			if (rxd->rx_dmamap) {
1427				bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1428				    rxd->rx_dmamap);
1429				rxd->rx_dmamap = NULL;
1430			}
1431		}
1432		if (sc->arge_cdata.arge_rx_sparemap) {
1433			bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1434			    sc->arge_cdata.arge_rx_sparemap);
1435			sc->arge_cdata.arge_rx_sparemap = 0;
1436		}
1437		bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1438		sc->arge_cdata.arge_rx_tag = NULL;
1439	}
1440
1441	if (sc->arge_cdata.arge_parent_tag) {
1442		bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1443		sc->arge_cdata.arge_parent_tag = NULL;
1444	}
1445}
1446
1447/*
1448 * Initialize the transmit descriptors.
1449 */
1450static int
1451arge_tx_ring_init(struct arge_softc *sc)
1452{
1453	struct arge_ring_data	*rd;
1454	struct arge_txdesc	*txd;
1455	bus_addr_t		addr;
1456	int			i;
1457
1458	sc->arge_cdata.arge_tx_prod = 0;
1459	sc->arge_cdata.arge_tx_cons = 0;
1460	sc->arge_cdata.arge_tx_cnt = 0;
1461
1462	rd = &sc->arge_rdata;
1463	bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1464	for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1465		if (i == ARGE_TX_RING_COUNT - 1)
1466			addr = ARGE_TX_RING_ADDR(sc, 0);
1467		else
1468			addr = ARGE_TX_RING_ADDR(sc, i + 1);
1469		rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1470		rd->arge_tx_ring[i].next_desc = addr;
1471		txd = &sc->arge_cdata.arge_txdesc[i];
1472		txd->tx_m = NULL;
1473	}
1474
1475	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1476	    sc->arge_cdata.arge_tx_ring_map,
1477	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1478
1479	return (0);
1480}
1481
1482/*
1483 * Initialize the RX descriptors and allocate mbufs for them. Note that
1484 * we arrange the descriptors in a closed ring, so that the last descriptor
1485 * points back to the first.
1486 */
1487static int
1488arge_rx_ring_init(struct arge_softc *sc)
1489{
1490	struct arge_ring_data	*rd;
1491	struct arge_rxdesc	*rxd;
1492	bus_addr_t		addr;
1493	int			i;
1494
1495	sc->arge_cdata.arge_rx_cons = 0;
1496
1497	rd = &sc->arge_rdata;
1498	bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1499	for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1500		rxd = &sc->arge_cdata.arge_rxdesc[i];
1501		rxd->rx_m = NULL;
1502		rxd->desc = &rd->arge_rx_ring[i];
1503		if (i == ARGE_RX_RING_COUNT - 1)
1504			addr = ARGE_RX_RING_ADDR(sc, 0);
1505		else
1506			addr = ARGE_RX_RING_ADDR(sc, i + 1);
1507		rd->arge_rx_ring[i].next_desc = addr;
1508		if (arge_newbuf(sc, i) != 0) {
1509			return (ENOBUFS);
1510		}
1511	}
1512
1513	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1514	    sc->arge_cdata.arge_rx_ring_map,
1515	    BUS_DMASYNC_PREWRITE);
1516
1517	return (0);
1518}
1519
1520/*
1521 * Initialize an RX descriptor and attach an MBUF cluster.
1522 */
1523static int
1524arge_newbuf(struct arge_softc *sc, int idx)
1525{
1526	struct arge_desc		*desc;
1527	struct arge_rxdesc	*rxd;
1528	struct mbuf		*m;
1529	bus_dma_segment_t	segs[1];
1530	bus_dmamap_t		map;
1531	int			nsegs;
1532
1533	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1534	if (m == NULL)
1535		return (ENOBUFS);
1536	m->m_len = m->m_pkthdr.len = MCLBYTES;
1537	m_adj(m, sizeof(uint64_t));
1538
1539	if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1540	    sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1541		m_freem(m);
1542		return (ENOBUFS);
1543	}
1544	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1545
1546	rxd = &sc->arge_cdata.arge_rxdesc[idx];
1547	if (rxd->rx_m != NULL) {
1548		bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1549	}
1550	map = rxd->rx_dmamap;
1551	rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1552	sc->arge_cdata.arge_rx_sparemap = map;
1553	rxd->rx_m = m;
1554	desc = rxd->desc;
1555	if (segs[0].ds_addr & 3)
1556		panic("RX packet address unaligned");
1557	desc->packet_addr = segs[0].ds_addr;
1558	desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1559
1560	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1561	    sc->arge_cdata.arge_rx_ring_map,
1562	    BUS_DMASYNC_PREWRITE);
1563
1564	return (0);
1565}
1566
1567static __inline void
1568arge_fixup_rx(struct mbuf *m)
1569{
1570	int		i;
1571	uint16_t	*src, *dst;
1572
1573	src = mtod(m, uint16_t *);
1574	dst = src - 1;
1575
1576	for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1577		*dst++ = *src++;
1578	}
1579
1580	if (m->m_len % sizeof(uint16_t))
1581		*(uint8_t *)dst = *(uint8_t *)src;
1582
1583	m->m_data -= ETHER_ALIGN;
1584}
1585
1586#ifdef DEVICE_POLLING
1587static int
1588arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1589{
1590	struct arge_softc *sc = ifp->if_softc;
1591	int rx_npkts = 0;
1592
1593	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1594		ARGE_LOCK(sc);
1595		arge_tx_locked(sc);
1596		rx_npkts = arge_rx_locked(sc);
1597		ARGE_UNLOCK(sc);
1598	}
1599
1600	return (rx_npkts);
1601}
1602#endif /* DEVICE_POLLING */
1603
1604
1605static void
1606arge_tx_locked(struct arge_softc *sc)
1607{
1608	struct arge_txdesc	*txd;
1609	struct arge_desc	*cur_tx;
1610	struct ifnet		*ifp;
1611	uint32_t		ctrl;
1612	int			cons, prod;
1613
1614	ARGE_LOCK_ASSERT(sc);
1615
1616	cons = sc->arge_cdata.arge_tx_cons;
1617	prod = sc->arge_cdata.arge_tx_prod;
1618	if (cons == prod)
1619		return;
1620
1621	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1622	    sc->arge_cdata.arge_tx_ring_map,
1623	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1624
1625	ifp = sc->arge_ifp;
1626	/*
1627	 * Go through our tx list and free mbufs for those
1628	 * frames that have been transmitted.
1629	 */
1630	for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1631		cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1632		ctrl = cur_tx->packet_ctrl;
1633		/* Check if descriptor has "finished" flag */
1634		if ((ctrl & ARGE_DESC_EMPTY) == 0)
1635			break;
1636
1637		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1638
1639		sc->arge_cdata.arge_tx_cnt--;
1640		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1641
1642		txd = &sc->arge_cdata.arge_txdesc[cons];
1643
1644		ifp->if_opackets++;
1645
1646		bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1647		    BUS_DMASYNC_POSTWRITE);
1648		bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1649
1650		/* Free only if it's first descriptor in list */
1651		if (txd->tx_m)
1652			m_freem(txd->tx_m);
1653		txd->tx_m = NULL;
1654
1655		/* reset descriptor */
1656		cur_tx->packet_addr = 0;
1657	}
1658
1659	sc->arge_cdata.arge_tx_cons = cons;
1660
1661	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1662	    sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1663}
1664
1665
1666static int
1667arge_rx_locked(struct arge_softc *sc)
1668{
1669	struct arge_rxdesc	*rxd;
1670	struct ifnet		*ifp = sc->arge_ifp;
1671	int			cons, prog, packet_len, i;
1672	struct arge_desc	*cur_rx;
1673	struct mbuf		*m;
1674	int			rx_npkts = 0;
1675
1676	ARGE_LOCK_ASSERT(sc);
1677
1678	cons = sc->arge_cdata.arge_rx_cons;
1679
1680	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1681	    sc->arge_cdata.arge_rx_ring_map,
1682	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1683
1684	for (prog = 0; prog < ARGE_RX_RING_COUNT;
1685	    ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1686		cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1687		rxd = &sc->arge_cdata.arge_rxdesc[cons];
1688		m = rxd->rx_m;
1689
1690		if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1691		       break;
1692
1693		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1694
1695		prog++;
1696
1697		packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1698		bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1699		    BUS_DMASYNC_POSTREAD);
1700		m = rxd->rx_m;
1701
1702		arge_fixup_rx(m);
1703		m->m_pkthdr.rcvif = ifp;
1704		/* Skip 4 bytes of CRC */
1705		m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1706		ifp->if_ipackets++;
1707		rx_npkts++;
1708
1709		ARGE_UNLOCK(sc);
1710		(*ifp->if_input)(ifp, m);
1711		ARGE_LOCK(sc);
1712		cur_rx->packet_addr = 0;
1713	}
1714
1715	if (prog > 0) {
1716
1717		i = sc->arge_cdata.arge_rx_cons;
1718		for (; prog > 0 ; prog--) {
1719			if (arge_newbuf(sc, i) != 0) {
1720				device_printf(sc->arge_dev,
1721				    "Failed to allocate buffer\n");
1722				break;
1723			}
1724			ARGE_INC(i, ARGE_RX_RING_COUNT);
1725		}
1726
1727		bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1728		    sc->arge_cdata.arge_rx_ring_map,
1729		    BUS_DMASYNC_PREWRITE);
1730
1731		sc->arge_cdata.arge_rx_cons = cons;
1732	}
1733
1734	return (rx_npkts);
1735}
1736
1737static int
1738arge_intr_filter(void *arg)
1739{
1740	struct arge_softc	*sc = arg;
1741	uint32_t		status, ints;
1742
1743	status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1744	ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1745
1746	ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints,
1747	    "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1748	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1749	ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status,
1750	    "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1751	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1752
1753	if (status & DMA_INTR_ALL) {
1754		sc->arge_intr_status |= status;
1755		ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1756		return (FILTER_SCHEDULE_THREAD);
1757	}
1758
1759	sc->arge_intr_status = 0;
1760	return (FILTER_STRAY);
1761}
1762
1763static void
1764arge_intr(void *arg)
1765{
1766	struct arge_softc	*sc = arg;
1767	uint32_t		status;
1768
1769	status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1770	status |= sc->arge_intr_status;
1771
1772	ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status,
1773	    "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
1774	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1775
1776	/*
1777	 * Is it our interrupt at all?
1778	 */
1779	if (status == 0)
1780		return;
1781
1782	if (status & DMA_INTR_RX_BUS_ERROR) {
1783		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1784		device_printf(sc->arge_dev, "RX bus error");
1785		return;
1786	}
1787
1788	if (status & DMA_INTR_TX_BUS_ERROR) {
1789		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1790		device_printf(sc->arge_dev, "TX bus error");
1791		return;
1792	}
1793
1794	ARGE_LOCK(sc);
1795
1796	if (status & DMA_INTR_RX_PKT_RCVD)
1797		arge_rx_locked(sc);
1798
1799	/*
1800	 * RX overrun disables the receiver.
1801	 * Clear indication and re-enable rx.
1802	 */
1803	if ( status & DMA_INTR_RX_OVERFLOW) {
1804		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1805		ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1806	}
1807
1808	if (status & DMA_INTR_TX_PKT_SENT)
1809		arge_tx_locked(sc);
1810	/*
1811	 * Underrun turns off TX. Clear underrun indication.
1812	 * If there's anything left in the ring, reactivate the tx.
1813	 */
1814	if (status & DMA_INTR_TX_UNDERRUN) {
1815		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1816		if (sc->arge_cdata.arge_tx_cnt > 0 ) {
1817			ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
1818			    DMA_TX_CONTROL_EN);
1819		}
1820	}
1821
1822	/*
1823	 * We handled all bits, clear status
1824	 */
1825	sc->arge_intr_status = 0;
1826	ARGE_UNLOCK(sc);
1827	/*
1828	 * re-enable all interrupts
1829	 */
1830	ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1831}
1832
1833
1834static void
1835arge_tick(void *xsc)
1836{
1837	struct arge_softc	*sc = xsc;
1838	struct mii_data		*mii;
1839
1840	ARGE_LOCK_ASSERT(sc);
1841
1842	if (sc->arge_miibus) {
1843		mii = device_get_softc(sc->arge_miibus);
1844		mii_tick(mii);
1845		callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1846	}
1847}
1848
1849int
1850arge_multiphy_mediachange(struct ifnet *ifp)
1851{
1852	struct arge_softc *sc = ifp->if_softc;
1853	struct ifmedia *ifm = &sc->arge_ifmedia;
1854	struct ifmedia_entry *ife = ifm->ifm_cur;
1855
1856	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1857		return (EINVAL);
1858
1859	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
1860		device_printf(sc->arge_dev,
1861		    "AUTO is not supported for multiphy MAC");
1862		return (EINVAL);
1863	}
1864
1865	/*
1866	 * Ignore everything
1867	 */
1868	return (0);
1869}
1870
1871void
1872arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1873{
1874	struct arge_softc *sc = ifp->if_softc;
1875
1876	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1877	ifmr->ifm_active = IFM_ETHER | sc->arge_media_type |
1878	    sc->arge_duplex_mode;
1879}
1880
1881