if_arge.c revision 198970
144743Smarkm/*-
244743Smarkm * Copyright (c) 2009, Oleksandr Tymoshenko
344743Smarkm * All rights reserved.
444743Smarkm *
544743Smarkm * Redistribution and use in source and binary forms, with or without
644743Smarkm * modification, are permitted provided that the following conditions
744743Smarkm * are met:
844743Smarkm * 1. Redistributions of source code must retain the above copyright
944743Smarkm *    notice unmodified, this list of conditions, and the following
1044743Smarkm *    disclaimer.
1144743Smarkm * 2. Redistributions in binary form must reproduce the above copyright
1244743Smarkm *    notice, this list of conditions and the following disclaimer in the
1344743Smarkm *    documentation and/or other materials provided with the distribution.
1444743Smarkm *
1551495Ssheldonh * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1651495Ssheldonh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1744743Smarkm * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1844743Smarkm * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1944743Smarkm * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2044743Smarkm * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2144743Smarkm * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2244743Smarkm * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2344743Smarkm * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2444743Smarkm * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2544743Smarkm * SUCH DAMAGE.
2644743Smarkm */
2756977Sshin
2856977Sshin#include <sys/cdefs.h>
2956977Sshin__FBSDID("$FreeBSD$");
3044743Smarkm
3144743Smarkm/*
3244743Smarkm * AR71XX gigabit ethernet driver
3344743Smarkm */
3444743Smarkm#ifdef HAVE_KERNEL_OPTION_HEADERS
3544743Smarkm#include "opt_device_polling.h"
3644743Smarkm#endif
3744743Smarkm
3844743Smarkm#include <sys/param.h>
3944743Smarkm#include <sys/endian.h>
4044743Smarkm#include <sys/systm.h>
4144743Smarkm#include <sys/sockio.h>
4244743Smarkm#include <sys/mbuf.h>
4344743Smarkm#include <sys/malloc.h>
4444743Smarkm#include <sys/kernel.h>
4544743Smarkm#include <sys/module.h>
4644743Smarkm#include <sys/socket.h>
4744743Smarkm#include <sys/taskqueue.h>
4844743Smarkm
4944743Smarkm#include <net/if.h>
5044743Smarkm#include <net/if_arp.h>
5144743Smarkm#include <net/ethernet.h>
5244743Smarkm#include <net/if_dl.h>
5344743Smarkm#include <net/if_media.h>
5444743Smarkm#include <net/if_types.h>
5544743Smarkm
5644743Smarkm#include <net/bpf.h>
5744743Smarkm
5844743Smarkm#include <machine/bus.h>
5944743Smarkm#include <machine/cache.h>
6044743Smarkm#include <machine/resource.h>
6144743Smarkm#include <vm/vm_param.h>
6244743Smarkm#include <vm/vm.h>
6344743Smarkm#include <vm/pmap.h>
6444743Smarkm#include <machine/pmap.h>
6544743Smarkm#include <sys/bus.h>
6644743Smarkm#include <sys/rman.h>
6744743Smarkm
6844743Smarkm#include <dev/mii/mii.h>
6944743Smarkm#include <dev/mii/miivar.h>
7044743Smarkm
7144743Smarkm#include <dev/pci/pcireg.h>
7244743Smarkm#include <dev/pci/pcivar.h>
7344743Smarkm
7444743SmarkmMODULE_DEPEND(arge, ether, 1, 1, 1);
7544743SmarkmMODULE_DEPEND(arge, miibus, 1, 1, 1);
7644743Smarkm
7744743Smarkm#include "miibus_if.h"
7844743Smarkm
7944743Smarkm#include <mips/atheros/ar71xxreg.h>
8044743Smarkm#include <mips/atheros/if_argevar.h>
8144743Smarkm
8244743Smarkm#undef ARGE_DEBUG
8344743Smarkm#ifdef ARGE_DEBUG
8444743Smarkm#define dprintf printf
8544743Smarkm#else
8644743Smarkm#define dprintf(x, arg...)
8744743Smarkm#endif
8844743Smarkm
8944743Smarkmstatic int arge_attach(device_t);
9044743Smarkmstatic int arge_detach(device_t);
9144743Smarkmstatic void arge_flush_ddr(struct arge_softc *);
9244743Smarkmstatic int arge_ifmedia_upd(struct ifnet *);
9344743Smarkmstatic void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
9444743Smarkmstatic int arge_ioctl(struct ifnet *, u_long, caddr_t);
9544743Smarkmstatic void arge_init(void *);
9644743Smarkmstatic void arge_init_locked(struct arge_softc *);
9744743Smarkmstatic void arge_link_task(void *, int);
9844743Smarkmstatic int arge_miibus_readreg(device_t, int, int);
9944743Smarkmstatic void arge_miibus_statchg(device_t);
10044743Smarkmstatic int arge_miibus_writereg(device_t, int, int, int);
10144743Smarkmstatic int arge_probe(device_t);
10244743Smarkmstatic void arge_reset_dma(struct arge_softc *);
10344743Smarkmstatic int arge_resume(device_t);
10444743Smarkmstatic int arge_rx_ring_init(struct arge_softc *);
10544743Smarkmstatic int arge_tx_ring_init(struct arge_softc *);
10644743Smarkm#ifdef DEVICE_POLLING
10744743Smarkmstatic int arge_poll(struct ifnet *, enum poll_cmd, int);
10844743Smarkm#endif
10944743Smarkmstatic int arge_shutdown(device_t);
11044743Smarkmstatic void arge_start(struct ifnet *);
11144743Smarkmstatic void arge_start_locked(struct ifnet *);
11244743Smarkmstatic void arge_stop(struct arge_softc *);
11344743Smarkmstatic int arge_suspend(device_t);
11444743Smarkm
11544743Smarkmstatic int arge_rx_locked(struct arge_softc *);
11644743Smarkmstatic void arge_tx_locked(struct arge_softc *);
11744743Smarkmstatic void arge_intr(void *);
11844743Smarkmstatic int arge_intr_filter(void *);
11944743Smarkmstatic void arge_tick(void *);
12044743Smarkm
12144743Smarkmstatic void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
12244743Smarkmstatic int arge_dma_alloc(struct arge_softc *);
12344743Smarkmstatic void arge_dma_free(struct arge_softc *);
12444743Smarkmstatic int arge_newbuf(struct arge_softc *, int);
12544743Smarkmstatic __inline void arge_fixup_rx(struct mbuf *);
12644743Smarkm
12744743Smarkmstatic device_method_t arge_methods[] = {
12844743Smarkm	/* Device interface */
12944743Smarkm	DEVMETHOD(device_probe,		arge_probe),
13044743Smarkm	DEVMETHOD(device_attach,	arge_attach),
13144743Smarkm	DEVMETHOD(device_detach,	arge_detach),
13244743Smarkm	DEVMETHOD(device_suspend,	arge_suspend),
13344743Smarkm	DEVMETHOD(device_resume,	arge_resume),
13444743Smarkm	DEVMETHOD(device_shutdown,	arge_shutdown),
13544743Smarkm
13644743Smarkm	/* bus interface */
13744743Smarkm	DEVMETHOD(bus_print_child,	bus_generic_print_child),
13844743Smarkm	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
13944743Smarkm
14044743Smarkm	/* MII interface */
14144743Smarkm	DEVMETHOD(miibus_readreg,	arge_miibus_readreg),
14244743Smarkm	DEVMETHOD(miibus_writereg,	arge_miibus_writereg),
14344743Smarkm	DEVMETHOD(miibus_statchg,	arge_miibus_statchg),
14444743Smarkm
14544743Smarkm	{ 0, 0 }
14644743Smarkm};
14744743Smarkm
14844743Smarkmstatic driver_t arge_driver = {
14944743Smarkm	"arge",
15044743Smarkm	arge_methods,
15144743Smarkm	sizeof(struct arge_softc)
15244743Smarkm};
15344743Smarkm
15444743Smarkmstatic devclass_t arge_devclass;
15544743Smarkm
15644743SmarkmDRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
15744743SmarkmDRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
15844743Smarkm
15944743Smarkm/*
16044743Smarkm * RedBoot passes MAC address to entry point as environment
16144743Smarkm * variable. platfrom_start parses it and stores in this variable
16244743Smarkm */
16344743Smarkmextern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
16444743Smarkm
16544743Smarkm/*
16644743Smarkm * Flushes all
16744743Smarkm */
16844743Smarkmstatic void
16944743Smarkmarge_flush_ddr(struct arge_softc *sc)
17044743Smarkm{
17144743Smarkm
17244743Smarkm	ATH_WRITE_REG(sc->arge_ddr_flush_reg, 1);
17344743Smarkm	while (ATH_READ_REG(sc->arge_ddr_flush_reg) & 1)
17444743Smarkm		;
17544743Smarkm
17644743Smarkm	ATH_WRITE_REG(sc->arge_ddr_flush_reg, 1);
17744743Smarkm	while (ATH_READ_REG(sc->arge_ddr_flush_reg) & 1)
17844743Smarkm		;
17944743Smarkm}
18044743Smarkm
18144743Smarkmstatic int
18244743Smarkmarge_probe(device_t dev)
18344743Smarkm{
18444743Smarkm
18544743Smarkm	device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
18644743Smarkm	return (0);
18744743Smarkm}
18844743Smarkm
18944743Smarkmstatic int
19044743Smarkmarge_attach(device_t dev)
19144743Smarkm{
19244743Smarkm	uint8_t			eaddr[ETHER_ADDR_LEN];
19344743Smarkm	struct ifnet		*ifp;
19444743Smarkm	struct arge_softc	*sc;
19544743Smarkm	int			error = 0, rid, phynum;
19644743Smarkm	uint32_t		reg, rnd;
19744743Smarkm	int			is_base_mac_empty, i;
19844743Smarkm
19944743Smarkm	sc = device_get_softc(dev);
20044743Smarkm	sc->arge_dev = dev;
20144743Smarkm	sc->arge_mac_unit = device_get_unit(dev);
20244743Smarkm
20344743Smarkm	KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
20444743Smarkm	    ("if_arge: Only MAC0 and MAC1 supported"));
20544743Smarkm	if (sc->arge_mac_unit == 0) {
20644743Smarkm		sc->arge_ddr_flush_reg = AR71XX_WB_FLUSH_GE0;
20744743Smarkm		sc->arge_pll_reg = AR71XX_PLL_ETH_INT0_CLK;
20844743Smarkm		sc->arge_pll_reg_shift = 17;
20944743Smarkm	} else {
21044743Smarkm		sc->arge_ddr_flush_reg = AR71XX_WB_FLUSH_GE1;
21144743Smarkm		sc->arge_pll_reg = AR71XX_PLL_ETH_INT1_CLK;
21244743Smarkm		sc->arge_pll_reg_shift = 19;
21344743Smarkm	}
21444743Smarkm
21544743Smarkm	/*
21644743Smarkm	 *  Get which PHY of 5 available we should use for this unit
21744743Smarkm	 */
21844743Smarkm	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
21944743Smarkm	    "phy", &phynum) != 0) {
22044743Smarkm		/*
22144743Smarkm		 * Use port 4 (WAN) for GE0. For any other port use
22244743Smarkm		 * its PHY the same as its unit number
22344743Smarkm		 */
22444743Smarkm		if (sc->arge_mac_unit == 0)
22544743Smarkm			phynum = 4;
22644743Smarkm		else
22744743Smarkm			phynum = sc->arge_mac_unit;
22844743Smarkm
22944743Smarkm		device_printf(dev, "No PHY specified, using %d\n", phynum);
23044743Smarkm	}
23144743Smarkm
23244743Smarkm	sc->arge_phy_num = phynum;
23344743Smarkm
23444743Smarkm	mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
23544743Smarkm	    MTX_DEF);
23644743Smarkm	callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
23744743Smarkm	TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
23844743Smarkm
23944743Smarkm	/* Map control/status registers. */
24044743Smarkm	sc->arge_rid = 0;
24144743Smarkm	sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
24244743Smarkm	    &sc->arge_rid, RF_ACTIVE);
24344743Smarkm
24444743Smarkm	if (sc->arge_res == NULL) {
24544743Smarkm		device_printf(dev, "couldn't map memory\n");
24644743Smarkm		error = ENXIO;
24744743Smarkm		goto fail;
24844743Smarkm	}
24944743Smarkm
25044743Smarkm	/* Allocate interrupts */
25144743Smarkm	rid = 0;
25244743Smarkm	sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
25344743Smarkm	    RF_SHAREABLE | RF_ACTIVE);
25444743Smarkm
25544743Smarkm	if (sc->arge_irq == NULL) {
25644743Smarkm		device_printf(dev, "couldn't map interrupt\n");
25744743Smarkm		error = ENXIO;
25844743Smarkm		goto fail;
25944743Smarkm	}
26044743Smarkm
26144743Smarkm	/* Allocate ifnet structure. */
26244743Smarkm	ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
26344743Smarkm
26444743Smarkm	if (ifp == NULL) {
26544743Smarkm		device_printf(dev, "couldn't allocate ifnet structure\n");
26644743Smarkm		error = ENOSPC;
26744743Smarkm		goto fail;
26844743Smarkm	}
26944743Smarkm
27044743Smarkm	ifp->if_softc = sc;
27144743Smarkm	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
27244743Smarkm	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
27344743Smarkm	ifp->if_ioctl = arge_ioctl;
27444743Smarkm	ifp->if_start = arge_start;
27544743Smarkm	ifp->if_init = arge_init;
27644743Smarkm	sc->arge_if_flags = ifp->if_flags;
27744743Smarkm
27844743Smarkm	/* XXX: add real size */
27944743Smarkm	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
28044743Smarkm	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
28144743Smarkm	IFQ_SET_READY(&ifp->if_snd);
28244743Smarkm
28344743Smarkm	ifp->if_capenable = ifp->if_capabilities;
28444743Smarkm#ifdef DEVICE_POLLING
28544743Smarkm	ifp->if_capabilities |= IFCAP_POLLING;
28644743Smarkm#endif
28744743Smarkm
28844743Smarkm	is_base_mac_empty = 1;
28944743Smarkm	for (i = 0; i < ETHER_ADDR_LEN; i++) {
29044743Smarkm		eaddr[i] = ar711_base_mac[i] & 0xff;
29144743Smarkm		if (eaddr[i] != 0)
29244743Smarkm			is_base_mac_empty = 0;
29344743Smarkm	}
29444743Smarkm
29544743Smarkm	if (is_base_mac_empty) {
29644743Smarkm		/*
29744743Smarkm		 * No MAC address configured. Generate the random one.
29844743Smarkm		 */
29944743Smarkm		if  (bootverbose)
30044743Smarkm			device_printf(dev,
30144743Smarkm			    "Generating random ethernet address.\n");
30244743Smarkm
30344743Smarkm		rnd = arc4random();
30444743Smarkm		eaddr[0] = 'b';
30544743Smarkm		eaddr[1] = 's';
30644743Smarkm		eaddr[2] = 'd';
30744743Smarkm		eaddr[3] = (rnd >> 24) & 0xff;
30844743Smarkm		eaddr[4] = (rnd >> 16) & 0xff;
30944743Smarkm		eaddr[5] = (rnd >> 8) & 0xff;
31044743Smarkm	}
31144743Smarkm
31244743Smarkm	if (sc->arge_mac_unit != 0)
31344743Smarkm		eaddr[5] +=  sc->arge_mac_unit;
31444743Smarkm
31544743Smarkm	if (arge_dma_alloc(sc) != 0) {
31644743Smarkm		error = ENXIO;
31744743Smarkm		goto fail;
31844743Smarkm	}
31944743Smarkm
32044743Smarkm	/* Initialize the MAC block */
32144743Smarkm
32244743Smarkm	/* Step 1. Soft-reset MAC */
32344743Smarkm	ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
32444743Smarkm	DELAY(20);
32544743Smarkm
32644743Smarkm	/* Step 2. Punt the MAC core from the central reset register */
32744743Smarkm	reg = ATH_READ_REG(AR71XX_RST_RESET);
32844743Smarkm	if (sc->arge_mac_unit == 0)
32944743Smarkm		reg |= RST_RESET_GE0_MAC;
33044743Smarkm	else if (sc->arge_mac_unit == 1)
33144743Smarkm		reg |= RST_RESET_GE1_MAC;
33244743Smarkm	ATH_WRITE_REG(AR71XX_RST_RESET, reg);
33344743Smarkm	DELAY(100);
33444743Smarkm	reg = ATH_READ_REG(AR71XX_RST_RESET);
33544743Smarkm	if (sc->arge_mac_unit == 0)
33644743Smarkm		reg &= ~RST_RESET_GE0_MAC;
33744743Smarkm	else if (sc->arge_mac_unit == 1)
33844743Smarkm		reg &= ~RST_RESET_GE1_MAC;
33944743Smarkm	ATH_WRITE_REG(AR71XX_RST_RESET, reg);
34044743Smarkm
34144743Smarkm	/* Step 3. Reconfigure MAC block */
34244743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_CFG1,
34344743Smarkm		MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
34444743Smarkm		MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
34544743Smarkm
34644743Smarkm	reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
34744743Smarkm	reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
34844743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
34944743Smarkm
35044743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
35144743Smarkm
35244743Smarkm	/* Reset MII bus */
35344743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
35444743Smarkm	DELAY(100);
35544743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
35644743Smarkm	DELAY(100);
35744743Smarkm
35851495Ssheldonh	/*
35951495Ssheldonh	 * Set all Ethernet address registers to the same initial values
36044743Smarkm	 * set all four addresses to 66-88-aa-cc-dd-ee
36144743Smarkm	 */
36244743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1,
36344743Smarkm	    (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8)  | eaddr[5]);
36444743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
36544743Smarkm
36644743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
36744743Smarkm	    FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
36844743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
36944743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
37044743Smarkm
37144743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
37244743Smarkm	    FIFO_RX_FILTMATCH_DEFAULT);
37344743Smarkm
37444743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
37544743Smarkm	    FIFO_RX_FILTMASK_DEFAULT);
37644743Smarkm
37744743Smarkm	/* Do MII setup. */
37844743Smarkm	if (mii_phy_probe(dev, &sc->arge_miibus,
37944743Smarkm	    arge_ifmedia_upd, arge_ifmedia_sts)) {
38044743Smarkm		device_printf(dev, "MII without any phy!\n");
38144743Smarkm		error = ENXIO;
38244743Smarkm		goto fail;
38344743Smarkm	}
38444743Smarkm
38544743Smarkm	/* Call MI attach routine. */
38644743Smarkm	ether_ifattach(ifp, eaddr);
38744743Smarkm
38844743Smarkm	/* Hook interrupt last to avoid having to lock softc */
38944743Smarkm	error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
39044743Smarkm	    arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
39144743Smarkm
39251495Ssheldonh	if (error) {
39351495Ssheldonh		device_printf(dev, "couldn't set up irq\n");
39444743Smarkm		ether_ifdetach(ifp);
39544743Smarkm		goto fail;
39644743Smarkm	}
39744743Smarkm
39844743Smarkmfail:
39944743Smarkm	if (error)
40044743Smarkm		arge_detach(dev);
40144743Smarkm
40244743Smarkm	return (error);
40344743Smarkm}
40444743Smarkm
40544743Smarkmstatic int
40644743Smarkmarge_detach(device_t dev)
40744743Smarkm{
40844743Smarkm	struct arge_softc	*sc = device_get_softc(dev);
40956977Sshin	struct ifnet		*ifp = sc->arge_ifp;
41056977Sshin
41156977Sshin	KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
41256977Sshin
41363158Sume	/* These should only be active if attach succeeded */
41456977Sshin	if (device_is_attached(dev)) {
41556977Sshin		ARGE_LOCK(sc);
41656977Sshin		sc->arge_detach = 1;
41756977Sshin#ifdef DEVICE_POLLING
41856977Sshin		if (ifp->if_capenable & IFCAP_POLLING)
41956977Sshin			ether_poll_deregister(ifp);
42056977Sshin#endif
42156977Sshin
42256977Sshin		arge_stop(sc);
42363158Sume		ARGE_UNLOCK(sc);
42463158Sume		taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
42563158Sume		ether_ifdetach(ifp);
42663158Sume	}
42763158Sume
42863158Sume	if (sc->arge_miibus)
42956977Sshin		device_delete_child(dev, sc->arge_miibus);
43063158Sume	bus_generic_detach(dev);
43156977Sshin
43256977Sshin	if (sc->arge_intrhand)
43356977Sshin		bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
43444743Smarkm
43544743Smarkm	if (sc->arge_res)
43644743Smarkm		bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
43744743Smarkm		    sc->arge_res);
43844743Smarkm
43951495Ssheldonh	if (ifp)
44044743Smarkm		if_free(ifp);
44144743Smarkm
44251495Ssheldonh	arge_dma_free(sc);
44351495Ssheldonh
44451495Ssheldonh	mtx_destroy(&sc->arge_mtx);
44551495Ssheldonh
44644743Smarkm	return (0);
44744743Smarkm
44844743Smarkm}
44944743Smarkm
45044743Smarkmstatic int
45144743Smarkmarge_suspend(device_t dev)
45244743Smarkm{
45344743Smarkm
45444743Smarkm	panic("%s", __func__);
45544743Smarkm	return 0;
45644743Smarkm}
45744743Smarkm
45844743Smarkmstatic int
45944743Smarkmarge_resume(device_t dev)
46044743Smarkm{
46144743Smarkm
46244743Smarkm	panic("%s", __func__);
46344743Smarkm	return 0;
46451495Ssheldonh}
46551495Ssheldonh
46651495Ssheldonhstatic int
46751495Ssheldonharge_shutdown(device_t dev)
46851495Ssheldonh{
46951495Ssheldonh	struct arge_softc	*sc;
47051495Ssheldonh
47151495Ssheldonh	sc = device_get_softc(dev);
47251495Ssheldonh
47351495Ssheldonh	ARGE_LOCK(sc);
47451495Ssheldonh	arge_stop(sc);
47551495Ssheldonh	ARGE_UNLOCK(sc);
47651495Ssheldonh
47751495Ssheldonh	return (0);
47851495Ssheldonh}
47944743Smarkm
48056977Sshinstatic int
48156977Sshinarge_miibus_readreg(device_t dev, int phy, int reg)
48256977Sshin{
48356977Sshin	struct arge_softc * sc = device_get_softc(dev);
48456977Sshin	int i, result;
48556977Sshin	uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
48656977Sshin	    | (reg & MAC_MII_REG_MASK);
48756977Sshin
48844743Smarkm	if (phy != sc->arge_phy_num)
48944743Smarkm		return (0);
49056977Sshin
49144743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
49244743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_ADDR, addr);
49344743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
49444743Smarkm
49544743Smarkm	i = ARGE_MII_TIMEOUT;
49644743Smarkm	while ((ARGE_READ(sc, AR71XX_MAC_MII_INDICATOR) &
49756977Sshin	    MAC_MII_INDICATOR_BUSY) && (i--))
49856977Sshin		DELAY(5);
49956977Sshin
50056977Sshin	if (i < 0) {
50144743Smarkm		dprintf("%s timedout\n", __func__);
50244743Smarkm		/* XXX: return ERRNO istead? */
50344743Smarkm		return (-1);
50444743Smarkm	}
50544743Smarkm
50644743Smarkm	result = ARGE_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
50744743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
50844743Smarkm	dprintf("%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__,
50944743Smarkm		 phy, reg, addr, result);
51044743Smarkm
51144743Smarkm	return (result);
51244743Smarkm}
51344743Smarkm
51444743Smarkmstatic int
51544743Smarkmarge_miibus_writereg(device_t dev, int phy, int reg, int data)
51644743Smarkm{
51744743Smarkm	struct arge_softc * sc = device_get_softc(dev);
51844743Smarkm	int i;
51944743Smarkm	uint32_t addr =
52044743Smarkm	    (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
52144743Smarkm
52244743Smarkm	dprintf("%s: phy=%d, reg=%02x, value=%04x\n", __func__,
52344743Smarkm	    phy, reg, data);
52444743Smarkm
52544743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_ADDR, addr);
52644743Smarkm	ARGE_WRITE(sc, AR71XX_MAC_MII_CONTROL, data);
52744743Smarkm
52844743Smarkm	i = ARGE_MII_TIMEOUT;
52944743Smarkm	while ((ARGE_READ(sc, AR71XX_MAC_MII_INDICATOR) &
530	    MAC_MII_INDICATOR_BUSY) && (i--))
531		DELAY(5);
532
533	if (i < 0) {
534		dprintf("%s timedout\n", __func__);
535		/* XXX: return ERRNO istead? */
536		return (-1);
537	}
538
539	return (0);
540}
541
542static void
543arge_miibus_statchg(device_t dev)
544{
545	struct arge_softc		*sc;
546
547	sc = device_get_softc(dev);
548	taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
549}
550
551static void
552arge_link_task(void *arg, int pending)
553{
554	struct arge_softc	*sc;
555	struct mii_data		*mii;
556	struct ifnet		*ifp;
557	uint32_t		media;
558	uint32_t		cfg, ifcontrol, rx_filtmask, pll, sec_cfg;
559
560	sc = (struct arge_softc *)arg;
561
562	ARGE_LOCK(sc);
563	mii = device_get_softc(sc->arge_miibus);
564	ifp = sc->arge_ifp;
565	if (mii == NULL || ifp == NULL ||
566	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
567		ARGE_UNLOCK(sc);
568		return;
569	}
570
571	if (mii->mii_media_status & IFM_ACTIVE) {
572
573		media = IFM_SUBTYPE(mii->mii_media_active);
574
575		if (media != IFM_NONE) {
576			sc->arge_link_status = 1;
577
578			cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
579			cfg &= ~(MAC_CFG2_IFACE_MODE_1000
580			    | MAC_CFG2_IFACE_MODE_10_100
581			    | MAC_CFG2_FULL_DUPLEX);
582
583			if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
584				cfg |= MAC_CFG2_FULL_DUPLEX;
585
586			ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
587			ifcontrol &= ~MAC_IFCONTROL_SPEED;
588			rx_filtmask =
589			    ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
590			rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
591
592			switch(media) {
593			case IFM_10_T:
594				cfg |= MAC_CFG2_IFACE_MODE_10_100;
595				pll = PLL_ETH_INT_CLK_10;
596				break;
597			case IFM_100_TX:
598				cfg |= MAC_CFG2_IFACE_MODE_10_100;
599				ifcontrol |= MAC_IFCONTROL_SPEED;
600				pll = PLL_ETH_INT_CLK_100;
601				break;
602			case IFM_1000_T:
603			case IFM_1000_SX:
604				cfg |= MAC_CFG2_IFACE_MODE_1000;
605				rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
606				pll = PLL_ETH_INT_CLK_1000;
607				break;
608			default:
609				pll = PLL_ETH_INT_CLK_100;
610				device_printf(sc->arge_dev,
611				    "Unknown media %d\n", media);
612			}
613
614			ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD,
615			    0x008001ff);
616
617			ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
618			ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
619			ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
620			    rx_filtmask);
621
622			/* set PLL registers */
623			sec_cfg = ATH_READ_REG(AR71XX_PLL_CPU_CONFIG);
624			sec_cfg &= ~(3 << sc->arge_pll_reg_shift);
625			sec_cfg |= (2 << sc->arge_pll_reg_shift);
626
627			ATH_WRITE_REG(AR71XX_PLL_SEC_CONFIG, sec_cfg);
628			DELAY(100);
629
630			ATH_WRITE_REG(sc->arge_pll_reg, pll);
631
632			sec_cfg |= (3 << sc->arge_pll_reg_shift);
633			ATH_WRITE_REG(AR71XX_PLL_SEC_CONFIG, sec_cfg);
634			DELAY(100);
635
636			sec_cfg &= ~(3 << sc->arge_pll_reg_shift);
637			ATH_WRITE_REG(AR71XX_PLL_SEC_CONFIG, sec_cfg);
638			DELAY(100);
639		}
640	} else
641		sc->arge_link_status = 0;
642
643	ARGE_UNLOCK(sc);
644}
645
646static void
647arge_reset_dma(struct arge_softc *sc)
648{
649	ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
650	ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
651
652	ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
653	ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
654
655	/* Clear all possible RX interrupts */
656	while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
657		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
658
659	/*
660	 * Clear all possible TX interrupts
661	 */
662	while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
663		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
664
665	/*
666	 * Now Rx/Tx errors
667	 */
668	ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
669	    DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
670	ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
671	    DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
672}
673
674
675
676static void
677arge_init(void *xsc)
678{
679	struct arge_softc	 *sc = xsc;
680
681	ARGE_LOCK(sc);
682	arge_init_locked(sc);
683	ARGE_UNLOCK(sc);
684}
685
686static void
687arge_init_locked(struct arge_softc *sc)
688{
689	struct ifnet		*ifp = sc->arge_ifp;
690	struct mii_data		*mii;
691
692	ARGE_LOCK_ASSERT(sc);
693
694	mii = device_get_softc(sc->arge_miibus);
695
696	arge_stop(sc);
697
698	/* Init circular RX list. */
699	if (arge_rx_ring_init(sc) != 0) {
700		device_printf(sc->arge_dev,
701		    "initialization failed: no memory for rx buffers\n");
702		arge_stop(sc);
703		return;
704	}
705
706	/* Init tx descriptors. */
707	arge_tx_ring_init(sc);
708
709	arge_reset_dma(sc);
710
711	sc->arge_link_status = 0;
712	mii_mediachg(mii);
713
714	ifp->if_drv_flags |= IFF_DRV_RUNNING;
715	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
716
717	callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
718
719	ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
720	ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
721
722	/* Start listening */
723	ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
724
725	/* Enable interrupts */
726	ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
727}
728
729/*
730 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
731 * pointers to the fragment pointers.
732 */
733static int
734arge_encap(struct arge_softc *sc, struct mbuf **m_head)
735{
736	struct arge_txdesc	*txd;
737	struct arge_desc	*desc, *prev_desc;
738	bus_dma_segment_t	txsegs[ARGE_MAXFRAGS];
739	int			error, i, nsegs, prod, prev_prod;
740	struct mbuf		*m;
741
742	ARGE_LOCK_ASSERT(sc);
743
744	/*
745	 * Fix mbuf chain, all fragments should be 4 bytes aligned and
746	 * even 4 bytes
747	 */
748	m = *m_head;
749	if((mtod(m, intptr_t) & 3) != 0) {
750		m = m_defrag(*m_head, M_DONTWAIT);
751		if (m == NULL) {
752			*m_head = NULL;
753			return (ENOBUFS);
754		}
755		*m_head = m;
756	}
757
758	prod = sc->arge_cdata.arge_tx_prod;
759	txd = &sc->arge_cdata.arge_txdesc[prod];
760	error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
761	    txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
762
763	if (error == EFBIG) {
764		panic("EFBIG");
765	} else if (error != 0)
766		return (error);
767
768	if (nsegs == 0) {
769		m_freem(*m_head);
770		*m_head = NULL;
771		return (EIO);
772	}
773
774	/* Check number of available descriptors. */
775	if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
776		bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
777		return (ENOBUFS);
778	}
779
780	txd->tx_m = *m_head;
781	bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
782	    BUS_DMASYNC_PREWRITE);
783
784	/*
785	 * Make a list of descriptors for this packet. DMA controller will
786	 * walk through it while arge_link is not zero.
787	 */
788	prev_prod = prod;
789	desc = prev_desc = NULL;
790	for (i = 0; i < nsegs; i++) {
791		desc = &sc->arge_rdata.arge_tx_ring[prod];
792		desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
793
794		if (txsegs[i].ds_addr & 3)
795			panic("TX packet address unaligned\n");
796
797		desc->packet_addr = txsegs[i].ds_addr;
798
799		/* link with previous descriptor */
800		if (prev_desc)
801			prev_desc->packet_ctrl |= ARGE_DESC_MORE;
802
803		sc->arge_cdata.arge_tx_cnt++;
804		prev_desc = desc;
805		ARGE_INC(prod, ARGE_TX_RING_COUNT);
806	}
807
808	/* Update producer index. */
809	sc->arge_cdata.arge_tx_prod = prod;
810
811	/* Sync descriptors. */
812	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
813	    sc->arge_cdata.arge_tx_ring_map,
814	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
815
816	/* Start transmitting */
817	ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
818	return (0);
819}
820
821static void
822arge_start(struct ifnet *ifp)
823{
824	struct arge_softc	 *sc;
825
826	sc = ifp->if_softc;
827
828	ARGE_LOCK(sc);
829	arge_start_locked(ifp);
830	ARGE_UNLOCK(sc);
831}
832
833static void
834arge_start_locked(struct ifnet *ifp)
835{
836	struct arge_softc	*sc;
837	struct mbuf		*m_head;
838	int			enq;
839
840	sc = ifp->if_softc;
841
842	ARGE_LOCK_ASSERT(sc);
843
844	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
845	    IFF_DRV_RUNNING || sc->arge_link_status == 0 )
846		return;
847
848	arge_flush_ddr(sc);
849
850	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
851	    sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
852		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
853		if (m_head == NULL)
854			break;
855
856
857		/*
858		 * Pack the data into the transmit ring.
859		 */
860		if (arge_encap(sc, &m_head)) {
861			if (m_head == NULL)
862				break;
863			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
864			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
865			break;
866		}
867
868		enq++;
869		/*
870		 * If there's a BPF listener, bounce a copy of this frame
871		 * to him.
872		 */
873		ETHER_BPF_MTAP(ifp, m_head);
874	}
875}
876
877static void
878arge_stop(struct arge_softc *sc)
879{
880	struct ifnet	    *ifp;
881
882	ARGE_LOCK_ASSERT(sc);
883
884	ifp = sc->arge_ifp;
885	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
886	callout_stop(&sc->arge_stat_callout);
887
888	/* mask out interrupts */
889	ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
890
891	arge_reset_dma(sc);
892}
893
894
895static int
896arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
897{
898	struct arge_softc		*sc = ifp->if_softc;
899	struct ifreq		*ifr = (struct ifreq *) data;
900	struct mii_data		*mii;
901	int			error;
902#ifdef DEVICE_POLLING
903	int			mask;
904#endif
905
906	switch (command) {
907	case SIOCSIFFLAGS:
908		ARGE_LOCK(sc);
909		if ((ifp->if_flags & IFF_UP) != 0) {
910			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
911				if (((ifp->if_flags ^ sc->arge_if_flags)
912				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
913					/* XXX: handle promisc & multi flags */
914				}
915
916			} else {
917				if (!sc->arge_detach)
918					arge_init_locked(sc);
919			}
920		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
921			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
922			arge_stop(sc);
923		}
924		sc->arge_if_flags = ifp->if_flags;
925		ARGE_UNLOCK(sc);
926		error = 0;
927		break;
928	case SIOCADDMULTI:
929	case SIOCDELMULTI:
930		/* XXX: implement SIOCDELMULTI */
931		error = 0;
932		break;
933	case SIOCGIFMEDIA:
934	case SIOCSIFMEDIA:
935		mii = device_get_softc(sc->arge_miibus);
936		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
937		break;
938	case SIOCSIFCAP:
939		/* XXX: Check other capabilities */
940#ifdef DEVICE_POLLING
941		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
942		if (mask & IFCAP_POLLING) {
943			if (ifr->ifr_reqcap & IFCAP_POLLING) {
944				ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
945				error = ether_poll_register(arge_poll, ifp);
946				if (error)
947					return error;
948				ARGE_LOCK(sc);
949				ifp->if_capenable |= IFCAP_POLLING;
950				ARGE_UNLOCK(sc);
951			} else {
952				ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
953				error = ether_poll_deregister(ifp);
954				ARGE_LOCK(sc);
955				ifp->if_capenable &= ~IFCAP_POLLING;
956				ARGE_UNLOCK(sc);
957			}
958		}
959		error = 0;
960		break;
961#endif
962	default:
963		error = ether_ioctl(ifp, command, data);
964		break;
965	}
966
967	return (error);
968}
969
970/*
971 * Set media options.
972 */
973static int
974arge_ifmedia_upd(struct ifnet *ifp)
975{
976	struct arge_softc		*sc;
977	struct mii_data		*mii;
978	struct mii_softc	*miisc;
979	int			error;
980
981	sc = ifp->if_softc;
982	ARGE_LOCK(sc);
983	mii = device_get_softc(sc->arge_miibus);
984	if (mii->mii_instance) {
985		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
986			mii_phy_reset(miisc);
987	}
988	error = mii_mediachg(mii);
989	ARGE_UNLOCK(sc);
990
991	return (error);
992}
993
994/*
995 * Report current media status.
996 */
997static void
998arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
999{
1000	struct arge_softc		*sc = ifp->if_softc;
1001	struct mii_data		*mii;
1002
1003	mii = device_get_softc(sc->arge_miibus);
1004	ARGE_LOCK(sc);
1005	mii_pollstat(mii);
1006	ARGE_UNLOCK(sc);
1007	ifmr->ifm_active = mii->mii_media_active;
1008	ifmr->ifm_status = mii->mii_media_status;
1009}
1010
1011struct arge_dmamap_arg {
1012	bus_addr_t	arge_busaddr;
1013};
1014
1015static void
1016arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1017{
1018	struct arge_dmamap_arg	*ctx;
1019
1020	if (error != 0)
1021		return;
1022	ctx = arg;
1023	ctx->arge_busaddr = segs[0].ds_addr;
1024}
1025
1026static int
1027arge_dma_alloc(struct arge_softc *sc)
1028{
1029	struct arge_dmamap_arg	ctx;
1030	struct arge_txdesc	*txd;
1031	struct arge_rxdesc	*rxd;
1032	int			error, i;
1033
1034	/* Create parent DMA tag. */
1035	error = bus_dma_tag_create(
1036	    bus_get_dma_tag(sc->arge_dev),	/* parent */
1037	    1, 0,			/* alignment, boundary */
1038	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1039	    BUS_SPACE_MAXADDR,		/* highaddr */
1040	    NULL, NULL,			/* filter, filterarg */
1041	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1042	    0,				/* nsegments */
1043	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1044	    0,				/* flags */
1045	    NULL, NULL,			/* lockfunc, lockarg */
1046	    &sc->arge_cdata.arge_parent_tag);
1047	if (error != 0) {
1048		device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1049		goto fail;
1050	}
1051	/* Create tag for Tx ring. */
1052	error = bus_dma_tag_create(
1053	    sc->arge_cdata.arge_parent_tag,	/* parent */
1054	    ARGE_RING_ALIGN, 0,		/* alignment, boundary */
1055	    BUS_SPACE_MAXADDR,		/* lowaddr */
1056	    BUS_SPACE_MAXADDR,		/* highaddr */
1057	    NULL, NULL,			/* filter, filterarg */
1058	    ARGE_TX_DMA_SIZE,		/* maxsize */
1059	    1,				/* nsegments */
1060	    ARGE_TX_DMA_SIZE,		/* maxsegsize */
1061	    0,				/* flags */
1062	    NULL, NULL,			/* lockfunc, lockarg */
1063	    &sc->arge_cdata.arge_tx_ring_tag);
1064	if (error != 0) {
1065		device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1066		goto fail;
1067	}
1068
1069	/* Create tag for Rx ring. */
1070	error = bus_dma_tag_create(
1071	    sc->arge_cdata.arge_parent_tag,	/* parent */
1072	    ARGE_RING_ALIGN, 0,		/* alignment, boundary */
1073	    BUS_SPACE_MAXADDR,		/* lowaddr */
1074	    BUS_SPACE_MAXADDR,		/* highaddr */
1075	    NULL, NULL,			/* filter, filterarg */
1076	    ARGE_RX_DMA_SIZE,		/* maxsize */
1077	    1,				/* nsegments */
1078	    ARGE_RX_DMA_SIZE,		/* maxsegsize */
1079	    0,				/* flags */
1080	    NULL, NULL,			/* lockfunc, lockarg */
1081	    &sc->arge_cdata.arge_rx_ring_tag);
1082	if (error != 0) {
1083		device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1084		goto fail;
1085	}
1086
1087	/* Create tag for Tx buffers. */
1088	error = bus_dma_tag_create(
1089	    sc->arge_cdata.arge_parent_tag,	/* parent */
1090	    sizeof(uint32_t), 0,	/* alignment, boundary */
1091	    BUS_SPACE_MAXADDR,		/* lowaddr */
1092	    BUS_SPACE_MAXADDR,		/* highaddr */
1093	    NULL, NULL,			/* filter, filterarg */
1094	    MCLBYTES * ARGE_MAXFRAGS,	/* maxsize */
1095	    ARGE_MAXFRAGS,		/* nsegments */
1096	    MCLBYTES,			/* maxsegsize */
1097	    0,				/* flags */
1098	    NULL, NULL,			/* lockfunc, lockarg */
1099	    &sc->arge_cdata.arge_tx_tag);
1100	if (error != 0) {
1101		device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1102		goto fail;
1103	}
1104
1105	/* Create tag for Rx buffers. */
1106	error = bus_dma_tag_create(
1107	    sc->arge_cdata.arge_parent_tag,	/* parent */
1108	    ARGE_RX_ALIGN, 0,		/* alignment, boundary */
1109	    BUS_SPACE_MAXADDR,		/* lowaddr */
1110	    BUS_SPACE_MAXADDR,		/* highaddr */
1111	    NULL, NULL,			/* filter, filterarg */
1112	    MCLBYTES,			/* maxsize */
1113	    ARGE_MAXFRAGS,		/* nsegments */
1114	    MCLBYTES,			/* maxsegsize */
1115	    0,				/* flags */
1116	    NULL, NULL,			/* lockfunc, lockarg */
1117	    &sc->arge_cdata.arge_rx_tag);
1118	if (error != 0) {
1119		device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1120		goto fail;
1121	}
1122
1123	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1124	error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1125	    (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1126	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1127	if (error != 0) {
1128		device_printf(sc->arge_dev,
1129		    "failed to allocate DMA'able memory for Tx ring\n");
1130		goto fail;
1131	}
1132
1133	ctx.arge_busaddr = 0;
1134	error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1135	    sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1136	    ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1137	if (error != 0 || ctx.arge_busaddr == 0) {
1138		device_printf(sc->arge_dev,
1139		    "failed to load DMA'able memory for Tx ring\n");
1140		goto fail;
1141	}
1142	sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1143
1144	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1145	error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1146	    (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1147	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1148	if (error != 0) {
1149		device_printf(sc->arge_dev,
1150		    "failed to allocate DMA'able memory for Rx ring\n");
1151		goto fail;
1152	}
1153
1154	ctx.arge_busaddr = 0;
1155	error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1156	    sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1157	    ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1158	if (error != 0 || ctx.arge_busaddr == 0) {
1159		device_printf(sc->arge_dev,
1160		    "failed to load DMA'able memory for Rx ring\n");
1161		goto fail;
1162	}
1163	sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1164
1165	/* Create DMA maps for Tx buffers. */
1166	for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1167		txd = &sc->arge_cdata.arge_txdesc[i];
1168		txd->tx_m = NULL;
1169		txd->tx_dmamap = NULL;
1170		error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1171		    &txd->tx_dmamap);
1172		if (error != 0) {
1173			device_printf(sc->arge_dev,
1174			    "failed to create Tx dmamap\n");
1175			goto fail;
1176		}
1177	}
1178	/* Create DMA maps for Rx buffers. */
1179	if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1180	    &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1181		device_printf(sc->arge_dev,
1182		    "failed to create spare Rx dmamap\n");
1183		goto fail;
1184	}
1185	for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1186		rxd = &sc->arge_cdata.arge_rxdesc[i];
1187		rxd->rx_m = NULL;
1188		rxd->rx_dmamap = NULL;
1189		error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1190		    &rxd->rx_dmamap);
1191		if (error != 0) {
1192			device_printf(sc->arge_dev,
1193			    "failed to create Rx dmamap\n");
1194			goto fail;
1195		}
1196	}
1197
1198fail:
1199	return (error);
1200}
1201
1202static void
1203arge_dma_free(struct arge_softc *sc)
1204{
1205	struct arge_txdesc	*txd;
1206	struct arge_rxdesc	*rxd;
1207	int			i;
1208
1209	/* Tx ring. */
1210	if (sc->arge_cdata.arge_tx_ring_tag) {
1211		if (sc->arge_cdata.arge_tx_ring_map)
1212			bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1213			    sc->arge_cdata.arge_tx_ring_map);
1214		if (sc->arge_cdata.arge_tx_ring_map &&
1215		    sc->arge_rdata.arge_tx_ring)
1216			bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1217			    sc->arge_rdata.arge_tx_ring,
1218			    sc->arge_cdata.arge_tx_ring_map);
1219		sc->arge_rdata.arge_tx_ring = NULL;
1220		sc->arge_cdata.arge_tx_ring_map = NULL;
1221		bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1222		sc->arge_cdata.arge_tx_ring_tag = NULL;
1223	}
1224	/* Rx ring. */
1225	if (sc->arge_cdata.arge_rx_ring_tag) {
1226		if (sc->arge_cdata.arge_rx_ring_map)
1227			bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1228			    sc->arge_cdata.arge_rx_ring_map);
1229		if (sc->arge_cdata.arge_rx_ring_map &&
1230		    sc->arge_rdata.arge_rx_ring)
1231			bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1232			    sc->arge_rdata.arge_rx_ring,
1233			    sc->arge_cdata.arge_rx_ring_map);
1234		sc->arge_rdata.arge_rx_ring = NULL;
1235		sc->arge_cdata.arge_rx_ring_map = NULL;
1236		bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1237		sc->arge_cdata.arge_rx_ring_tag = NULL;
1238	}
1239	/* Tx buffers. */
1240	if (sc->arge_cdata.arge_tx_tag) {
1241		for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1242			txd = &sc->arge_cdata.arge_txdesc[i];
1243			if (txd->tx_dmamap) {
1244				bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1245				    txd->tx_dmamap);
1246				txd->tx_dmamap = NULL;
1247			}
1248		}
1249		bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1250		sc->arge_cdata.arge_tx_tag = NULL;
1251	}
1252	/* Rx buffers. */
1253	if (sc->arge_cdata.arge_rx_tag) {
1254		for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1255			rxd = &sc->arge_cdata.arge_rxdesc[i];
1256			if (rxd->rx_dmamap) {
1257				bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1258				    rxd->rx_dmamap);
1259				rxd->rx_dmamap = NULL;
1260			}
1261		}
1262		if (sc->arge_cdata.arge_rx_sparemap) {
1263			bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1264			    sc->arge_cdata.arge_rx_sparemap);
1265			sc->arge_cdata.arge_rx_sparemap = 0;
1266		}
1267		bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1268		sc->arge_cdata.arge_rx_tag = NULL;
1269	}
1270
1271	if (sc->arge_cdata.arge_parent_tag) {
1272		bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1273		sc->arge_cdata.arge_parent_tag = NULL;
1274	}
1275}
1276
1277/*
1278 * Initialize the transmit descriptors.
1279 */
1280static int
1281arge_tx_ring_init(struct arge_softc *sc)
1282{
1283	struct arge_ring_data	*rd;
1284	struct arge_txdesc	*txd;
1285	bus_addr_t		addr;
1286	int			i;
1287
1288	sc->arge_cdata.arge_tx_prod = 0;
1289	sc->arge_cdata.arge_tx_cons = 0;
1290	sc->arge_cdata.arge_tx_cnt = 0;
1291	sc->arge_cdata.arge_tx_pkts = 0;
1292
1293	rd = &sc->arge_rdata;
1294	bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1295	for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1296		if (i == ARGE_TX_RING_COUNT - 1)
1297			addr = ARGE_TX_RING_ADDR(sc, 0);
1298		else
1299			addr = ARGE_TX_RING_ADDR(sc, i + 1);
1300		rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1301		rd->arge_tx_ring[i].next_desc = addr;
1302		txd = &sc->arge_cdata.arge_txdesc[i];
1303		txd->tx_m = NULL;
1304	}
1305
1306	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1307	    sc->arge_cdata.arge_tx_ring_map,
1308	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1309
1310	return (0);
1311}
1312
1313/*
1314 * Initialize the RX descriptors and allocate mbufs for them. Note that
1315 * we arrange the descriptors in a closed ring, so that the last descriptor
1316 * points back to the first.
1317 */
1318static int
1319arge_rx_ring_init(struct arge_softc *sc)
1320{
1321	struct arge_ring_data	*rd;
1322	struct arge_rxdesc	*rxd;
1323	bus_addr_t		addr;
1324	int			i;
1325
1326	sc->arge_cdata.arge_rx_cons = 0;
1327
1328	rd = &sc->arge_rdata;
1329	bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1330	for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1331		rxd = &sc->arge_cdata.arge_rxdesc[i];
1332		rxd->rx_m = NULL;
1333		rxd->desc = &rd->arge_rx_ring[i];
1334		if (i == ARGE_RX_RING_COUNT - 1)
1335			addr = ARGE_RX_RING_ADDR(sc, 0);
1336		else
1337			addr = ARGE_RX_RING_ADDR(sc, i + 1);
1338		rd->arge_rx_ring[i].next_desc = addr;
1339		if (arge_newbuf(sc, i) != 0) {
1340			return (ENOBUFS);
1341		}
1342	}
1343
1344	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1345	    sc->arge_cdata.arge_rx_ring_map,
1346	    BUS_DMASYNC_PREWRITE);
1347
1348	return (0);
1349}
1350
1351/*
1352 * Initialize an RX descriptor and attach an MBUF cluster.
1353 */
1354static int
1355arge_newbuf(struct arge_softc *sc, int idx)
1356{
1357	struct arge_desc		*desc;
1358	struct arge_rxdesc	*rxd;
1359	struct mbuf		*m;
1360	bus_dma_segment_t	segs[1];
1361	bus_dmamap_t		map;
1362	int			nsegs;
1363
1364	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1365	if (m == NULL)
1366		return (ENOBUFS);
1367	m->m_len = m->m_pkthdr.len = MCLBYTES;
1368	m_adj(m, sizeof(uint64_t));
1369
1370	if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1371	    sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1372		m_freem(m);
1373		return (ENOBUFS);
1374	}
1375	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1376
1377	rxd = &sc->arge_cdata.arge_rxdesc[idx];
1378	if (rxd->rx_m != NULL) {
1379		bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1380	}
1381	map = rxd->rx_dmamap;
1382	rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1383	sc->arge_cdata.arge_rx_sparemap = map;
1384	rxd->rx_m = m;
1385	desc = rxd->desc;
1386	if (segs[0].ds_addr & 3)
1387		panic("RX packet address unaligned");
1388	desc->packet_addr = segs[0].ds_addr;
1389	desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1390
1391	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1392	    sc->arge_cdata.arge_rx_ring_map,
1393	    BUS_DMASYNC_PREWRITE);
1394
1395	return (0);
1396}
1397
1398static __inline void
1399arge_fixup_rx(struct mbuf *m)
1400{
1401	int		i;
1402	uint16_t	*src, *dst;
1403
1404	src = mtod(m, uint16_t *);
1405	dst = src - 1;
1406
1407	for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1408		*dst++ = *src++;
1409	}
1410
1411	if (m->m_len % sizeof(uint16_t))
1412		*(uint8_t *)dst = *(uint8_t *)src;
1413
1414	m->m_data -= ETHER_ALIGN;
1415}
1416
1417#ifdef DEVICE_POLLING
1418static int
1419arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1420{
1421	struct arge_softc *sc = ifp->if_softc;
1422	int rx_npkts = 0;
1423
1424	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1425		ARGE_LOCK(sc);
1426		arge_tx_locked(sc);
1427		rx_npkts = arge_rx_locked(sc);
1428		ARGE_UNLOCK(sc);
1429	}
1430
1431	return (rx_npkts);
1432}
1433#endif /* DEVICE_POLLING */
1434
1435
1436static void
1437arge_tx_locked(struct arge_softc *sc)
1438{
1439	struct arge_txdesc	*txd;
1440	struct arge_desc	*cur_tx;
1441	struct ifnet		*ifp;
1442	uint32_t		ctrl;
1443	int			cons, prod;
1444
1445	ARGE_LOCK_ASSERT(sc);
1446
1447	cons = sc->arge_cdata.arge_tx_cons;
1448	prod = sc->arge_cdata.arge_tx_prod;
1449	if (cons == prod)
1450		return;
1451
1452	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1453	    sc->arge_cdata.arge_tx_ring_map,
1454	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1455
1456	ifp = sc->arge_ifp;
1457	/*
1458	 * Go through our tx list and free mbufs for those
1459	 * frames that have been transmitted.
1460	 */
1461	for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1462		cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1463		ctrl = cur_tx->packet_ctrl;
1464		/* Check if descriptor has "finished" flag */
1465		if ((ctrl & ARGE_DESC_EMPTY) == 0)
1466			break;
1467
1468		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1469
1470		sc->arge_cdata.arge_tx_cnt--;
1471		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1472
1473		txd = &sc->arge_cdata.arge_txdesc[cons];
1474
1475		ifp->if_opackets++;
1476
1477		bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1478		    BUS_DMASYNC_POSTWRITE);
1479		bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1480
1481		/* Free only if it's first descriptor in list */
1482		if (txd->tx_m)
1483			m_freem(txd->tx_m);
1484		txd->tx_m = NULL;
1485
1486		/* reset descriptor */
1487		cur_tx->packet_addr = 0;
1488	}
1489
1490	sc->arge_cdata.arge_tx_cons = cons;
1491
1492	bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1493	    sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1494}
1495
1496
1497static int
1498arge_rx_locked(struct arge_softc *sc)
1499{
1500	struct arge_rxdesc	*rxd;
1501	struct ifnet		*ifp = sc->arge_ifp;
1502	int			cons, prog, packet_len, i;
1503	struct arge_desc	*cur_rx;
1504	struct mbuf		*m;
1505	int			rx_npkts = 0;
1506
1507	ARGE_LOCK_ASSERT(sc);
1508
1509	cons = sc->arge_cdata.arge_rx_cons;
1510
1511	bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1512	    sc->arge_cdata.arge_rx_ring_map,
1513	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1514
1515	for (prog = 0; prog < ARGE_RX_RING_COUNT;
1516	    ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1517		cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1518		rxd = &sc->arge_cdata.arge_rxdesc[cons];
1519		m = rxd->rx_m;
1520
1521		if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1522		       break;
1523
1524		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1525
1526		prog++;
1527
1528		packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1529		bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1530		    BUS_DMASYNC_POSTREAD);
1531		m = rxd->rx_m;
1532
1533		arge_fixup_rx(m);
1534		m->m_pkthdr.rcvif = ifp;
1535		/* Skip 4 bytes of CRC */
1536		m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1537		ifp->if_ipackets++;
1538		rx_npkts++;
1539
1540		ARGE_UNLOCK(sc);
1541		(*ifp->if_input)(ifp, m);
1542		ARGE_LOCK(sc);
1543		cur_rx->packet_addr = 0;
1544	}
1545
1546	if (prog > 0) {
1547
1548		i = sc->arge_cdata.arge_rx_cons;
1549		for (; prog > 0 ; prog--) {
1550			if (arge_newbuf(sc, i) != 0) {
1551				device_printf(sc->arge_dev,
1552				    "Failed to allocate buffer\n");
1553				break;
1554			}
1555			ARGE_INC(i, ARGE_RX_RING_COUNT);
1556		}
1557
1558		bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1559		    sc->arge_cdata.arge_rx_ring_map,
1560		    BUS_DMASYNC_PREWRITE);
1561
1562		sc->arge_cdata.arge_rx_cons = cons;
1563	}
1564
1565	return (rx_npkts);
1566}
1567
1568static int
1569arge_intr_filter(void *arg)
1570{
1571	struct arge_softc	*sc = arg;
1572	uint32_t		status, ints;
1573
1574	status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1575	ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1576
1577#if 0
1578	dprintf("int mask(filter) = %b\n", ints,
1579	    "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1580	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1581	dprintf("status(filter) = %b\n", status,
1582	    "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1583	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1584#endif
1585
1586	if (status & DMA_INTR_ALL) {
1587		sc->arge_intr_status |= status;
1588		ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1589		return (FILTER_SCHEDULE_THREAD);
1590	}
1591
1592	sc->arge_intr_status = 0;
1593	return (FILTER_STRAY);
1594}
1595
1596static void
1597arge_intr(void *arg)
1598{
1599	struct arge_softc	*sc = arg;
1600	uint32_t		status;
1601
1602	status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1603	status |= sc->arge_intr_status;
1604
1605#if 0
1606	dprintf("int status(intr) = %b\n", status,
1607	    "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
1608	    "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1609#endif
1610
1611	/*
1612	 * Is it our interrupt at all?
1613	 */
1614	if (status == 0)
1615		return;
1616
1617	if (status & DMA_INTR_RX_BUS_ERROR) {
1618		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1619		device_printf(sc->arge_dev, "RX bus error");
1620		return;
1621	}
1622
1623	if (status & DMA_INTR_TX_BUS_ERROR) {
1624		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1625		device_printf(sc->arge_dev, "TX bus error");
1626		return;
1627	}
1628
1629	ARGE_LOCK(sc);
1630
1631	if (status & DMA_INTR_RX_PKT_RCVD)
1632		arge_rx_locked(sc);
1633
1634	/*
1635	 * RX overrun disables the receiver.
1636	 * Clear indication and re-enable rx.
1637	 */
1638	if ( status & DMA_INTR_RX_OVERFLOW) {
1639		ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1640		ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1641	}
1642
1643	if (status & DMA_INTR_TX_PKT_SENT)
1644		arge_tx_locked(sc);
1645	/*
1646	 * Underrun turns off TX. Clear underrun indication.
1647	 * If there's anything left in the ring, reactivate the tx.
1648	 */
1649	if (status & DMA_INTR_TX_UNDERRUN) {
1650		ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1651		if (sc->arge_cdata.arge_tx_pkts > 0 ) {
1652			ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
1653			    DMA_TX_CONTROL_EN);
1654		}
1655	}
1656
1657	/*
1658	 * We handled all bits, clear status
1659	 */
1660	sc->arge_intr_status = 0;
1661	ARGE_UNLOCK(sc);
1662	/*
1663	 * re-enable all interrupts
1664	 */
1665	ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1666}
1667
1668
1669static void
1670arge_tick(void *xsc)
1671{
1672	struct arge_softc	*sc = xsc;
1673	struct mii_data		*mii;
1674
1675	ARGE_LOCK_ASSERT(sc);
1676
1677	mii = device_get_softc(sc->arge_miibus);
1678	mii_tick(mii);
1679	callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1680}
1681