if_npe.c revision 169954
1164426Ssam/*-
2164426Ssam * Copyright (c) 2006 Sam Leffler.  All rights reserved.
3164426Ssam *
4164426Ssam * Redistribution and use in source and binary forms, with or without
5164426Ssam * modification, are permitted provided that the following conditions
6164426Ssam * are met:
7164426Ssam * 1. Redistributions of source code must retain the above copyright
8164426Ssam *    notice, this list of conditions and the following disclaimer.
9164426Ssam * 2. Redistributions in binary form must reproduce the above copyright
10164426Ssam *    notice, this list of conditions and the following disclaimer in the
11164426Ssam *    documentation and/or other materials provided with the distribution.
12164426Ssam *
13164426Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14164426Ssam * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15164426Ssam * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16164426Ssam * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17164426Ssam * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18164426Ssam * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19164426Ssam * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20164426Ssam * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21164426Ssam * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22164426Ssam * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23164426Ssam */
24164426Ssam
25164426Ssam#include <sys/cdefs.h>
26164426Ssam__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 169954 2007-05-24 16:31:22Z sam $");
27164426Ssam
28164426Ssam/*
29164426Ssam * Intel XScale NPE Ethernet driver.
30164426Ssam *
31164426Ssam * This driver handles the two ports present on the IXP425.
32164426Ssam * Packet processing is done by the Network Processing Engines
33164426Ssam * (NPE's) that work together with a MAC and PHY. The MAC
34164426Ssam * is also mapped to the XScale cpu; the PHY is accessed via
35164426Ssam * the MAC. NPE-XScale communication happens through h/w
36164426Ssam * queues managed by the Q Manager block.
37164426Ssam *
38164426Ssam * The code here replaces the ethAcc, ethMii, and ethDB classes
39164426Ssam * in the Intel Access Library (IAL) and the OS-specific driver.
40164426Ssam *
41164426Ssam * XXX add vlan support
42164426Ssam * XXX NPE-C port doesn't work yet
43164426Ssam */
44164426Ssam#ifdef HAVE_KERNEL_OPTION_HEADERS
45164426Ssam#include "opt_device_polling.h"
46164426Ssam#endif
47164426Ssam
48164426Ssam#include <sys/param.h>
49164426Ssam#include <sys/systm.h>
50164426Ssam#include <sys/bus.h>
51164426Ssam#include <sys/kernel.h>
52164426Ssam#include <sys/mbuf.h>
53164426Ssam#include <sys/malloc.h>
54164426Ssam#include <sys/module.h>
55164426Ssam#include <sys/rman.h>
56164426Ssam#include <sys/socket.h>
57164426Ssam#include <sys/sockio.h>
58164426Ssam#include <sys/sysctl.h>
59164426Ssam#include <sys/endian.h>
60164426Ssam#include <machine/bus.h>
61164426Ssam
62164426Ssam#include <net/ethernet.h>
63164426Ssam#include <net/if.h>
64164426Ssam#include <net/if_arp.h>
65164426Ssam#include <net/if_dl.h>
66164426Ssam#include <net/if_media.h>
67164426Ssam#include <net/if_mib.h>
68164426Ssam#include <net/if_types.h>
69164426Ssam
70164426Ssam#ifdef INET
71164426Ssam#include <netinet/in.h>
72164426Ssam#include <netinet/in_systm.h>
73164426Ssam#include <netinet/in_var.h>
74164426Ssam#include <netinet/ip.h>
75164426Ssam#endif
76164426Ssam
77164426Ssam#include <net/bpf.h>
78164426Ssam#include <net/bpfdesc.h>
79164426Ssam
80164426Ssam#include <arm/xscale/ixp425/ixp425reg.h>
81164426Ssam#include <arm/xscale/ixp425/ixp425var.h>
82164426Ssam#include <arm/xscale/ixp425/ixp425_qmgr.h>
83164426Ssam#include <arm/xscale/ixp425/ixp425_npevar.h>
84164426Ssam
85164426Ssam#include <dev/mii/mii.h>
86164426Ssam#include <dev/mii/miivar.h>
87164426Ssam#include <arm/xscale/ixp425/if_npereg.h>
88164426Ssam
89164426Ssam#include "miibus_if.h"
90164426Ssam
91166064Scognet/*
92166064Scognet * XXX: For the main bus dma tag. Can go away if the new method to get the
93166064Scognet * dma tag from the parent got MFC'd into RELENG_6.
94166064Scognet */
95166064Scognetextern struct ixp425_softc *ixp425_softc;
96166064Scognet
97164426Ssamstruct npebuf {
98164426Ssam	struct npebuf	*ix_next;	/* chain to next buffer */
99164426Ssam	void		*ix_m;		/* backpointer to mbuf */
100164426Ssam	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
101164426Ssam	struct npehwbuf	*ix_hw;		/* associated h/w block */
102164426Ssam	uint32_t	ix_neaddr;	/* phys address of ix_hw */
103164426Ssam};
104164426Ssam
105164426Ssamstruct npedma {
106164426Ssam	const char*	name;
107164426Ssam	int		nbuf;		/* # npebuf's allocated */
108164426Ssam	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
109164426Ssam	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
110164426Ssam	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
111164426Ssam	bus_dmamap_t	buf_map;
112164426Ssam	bus_addr_t	buf_phys;	/* phys addr of buffers */
113164426Ssam	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
114164426Ssam};
115164426Ssam
116164426Ssamstruct npe_softc {
117164426Ssam	/* XXX mii requires this be first; do not move! */
118164426Ssam	struct ifnet	*sc_ifp;	/* ifnet pointer */
119164426Ssam	struct mtx	sc_mtx;		/* basically a perimeter lock */
120164426Ssam	device_t	sc_dev;
121164426Ssam	bus_space_tag_t	sc_iot;
122164426Ssam	bus_space_handle_t sc_ioh;	/* MAC register window */
123164426Ssam	device_t	sc_mii;		/* child miibus */
124164426Ssam	bus_space_handle_t sc_miih;	/* MII register window */
125164426Ssam	struct ixpnpe_softc *sc_npe;	/* NPE support */
126164426Ssam	int		sc_debug;	/* DPRINTF* control */
127164426Ssam	int		sc_tickinterval;
128164426Ssam	struct callout	tick_ch;	/* Tick callout */
129166339Skevlo	int		npe_watchdog_timer;
130164426Ssam	struct npedma	txdma;
131164426Ssam	struct npebuf	*tx_free;	/* list of free tx buffers */
132164426Ssam	struct npedma	rxdma;
133164426Ssam	bus_addr_t	buf_phys;	/* XXX for returning a value */
134164426Ssam	int		rx_qid;		/* rx qid */
135164426Ssam	int		rx_freeqid;	/* rx free buffers qid */
136164426Ssam	int		tx_qid;		/* tx qid */
137164426Ssam	int		tx_doneqid;	/* tx completed qid */
138164426Ssam	struct ifmib_iso_8802_3 mibdata;
139164426Ssam	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
140164426Ssam	struct npestats	*sc_stats;
141164426Ssam	bus_dmamap_t	sc_stats_map;
142164426Ssam	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
143164426Ssam};
144164426Ssam
145164426Ssam/*
146164426Ssam * Per-unit static configuration for IXP425.  The tx and
147164426Ssam * rx free Q id's are fixed by the NPE microcode.  The
148164426Ssam * rx Q id's are programmed to be separate to simplify
149164426Ssam * multi-port processing.  It may be better to handle
150164426Ssam * all traffic through one Q (as done by the Intel drivers).
151164426Ssam *
152164426Ssam * Note that the PHY's are accessible only from MAC A
153164426Ssam * on the IXP425.  This and other platform-specific
154164426Ssam * assumptions probably need to be handled through hints.
155164426Ssam */
156164426Ssamstatic const struct {
157164426Ssam	const char	*desc;		/* device description */
158164426Ssam	int		npeid;		/* NPE assignment */
159164426Ssam	uint32_t	imageid;	/* NPE firmware image id */
160164426Ssam	uint32_t	regbase;
161164426Ssam	int		regsize;
162164426Ssam	uint32_t	miibase;
163164426Ssam	int		miisize;
164164426Ssam	uint8_t		rx_qid;
165164426Ssam	uint8_t		rx_freeqid;
166164426Ssam	uint8_t		tx_qid;
167164426Ssam	uint8_t		tx_doneqid;
168164426Ssam} npeconfig[NPE_PORTS_MAX] = {
169164426Ssam	{ .desc		= "IXP NPE-B",
170164426Ssam	  .npeid	= NPE_B,
171164426Ssam	  .imageid	= IXP425_NPE_B_IMAGEID,
172164426Ssam	  .regbase	= IXP425_MAC_A_HWBASE,
173164426Ssam	  .regsize	= IXP425_MAC_A_SIZE,
174164426Ssam	  .miibase	= IXP425_MAC_A_HWBASE,
175164426Ssam	  .miisize	= IXP425_MAC_A_SIZE,
176164426Ssam	  .rx_qid	= 4,
177164426Ssam	  .rx_freeqid	= 27,
178164426Ssam	  .tx_qid	= 24,
179164426Ssam	  .tx_doneqid	= 31
180164426Ssam	},
181164426Ssam	{ .desc		= "IXP NPE-C",
182164426Ssam	  .npeid	= NPE_C,
183164426Ssam	  .imageid	= IXP425_NPE_C_IMAGEID,
184164426Ssam	  .regbase	= IXP425_MAC_B_HWBASE,
185164426Ssam	  .regsize	= IXP425_MAC_B_SIZE,
186164426Ssam	  .miibase	= IXP425_MAC_A_HWBASE,
187164426Ssam	  .miisize	= IXP425_MAC_A_SIZE,
188164426Ssam	  .rx_qid	= 12,
189164426Ssam	  .rx_freeqid	= 28,
190164426Ssam	  .tx_qid	= 25,
191164426Ssam	  .tx_doneqid	= 31
192164426Ssam	},
193164426Ssam};
194164426Ssamstatic struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
195164426Ssam
196164426Ssamstatic __inline uint32_t
197164426SsamRD4(struct npe_softc *sc, bus_size_t off)
198164426Ssam{
199164426Ssam	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
200164426Ssam}
201164426Ssam
202164426Ssamstatic __inline void
203164426SsamWR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
204164426Ssam{
205164426Ssam	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
206164426Ssam}
207164426Ssam
208164426Ssam#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
209164426Ssam#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
210164426Ssam#define NPE_LOCK_INIT(_sc) \
211164426Ssam	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
212164426Ssam	    MTX_NETWORK_LOCK, MTX_DEF)
213164426Ssam#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
214164426Ssam#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
215164426Ssam#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
216164426Ssam
217164426Ssamstatic devclass_t npe_devclass;
218164426Ssam
219164426Ssamstatic int	npe_activate(device_t dev);
220164426Ssamstatic void	npe_deactivate(device_t dev);
221164426Ssamstatic int	npe_ifmedia_update(struct ifnet *ifp);
222164426Ssamstatic void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
223164426Ssamstatic void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
224164426Ssamstatic void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
225164426Ssamstatic void	npe_txdone(int qid, void *arg);
226164426Ssamstatic int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
227164426Ssam			struct mbuf *);
228164426Ssamstatic void	npe_rxdone(int qid, void *arg);
229164426Ssamstatic void	npeinit(void *);
230164426Ssamstatic void	npestart_locked(struct ifnet *);
231164426Ssamstatic void	npestart(struct ifnet *);
232164426Ssamstatic void	npestop(struct npe_softc *);
233166339Skevlostatic void	npewatchdog(struct npe_softc *);
234164426Ssamstatic int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
235164426Ssam
236164426Ssamstatic int	npe_setrxqosentry(struct npe_softc *, int classix,
237164426Ssam			int trafclass, int qid);
238164426Ssamstatic int	npe_updatestats(struct npe_softc *);
239164426Ssam#if 0
240164426Ssamstatic int	npe_getstats(struct npe_softc *);
241164426Ssamstatic uint32_t	npe_getimageid(struct npe_softc *);
242164426Ssamstatic int	npe_setloopback(struct npe_softc *, int ena);
243164426Ssam#endif
244164426Ssam
245164426Ssam/* NB: all tx done processing goes through one queue */
246164426Ssamstatic int tx_doneqid = -1;
247164426Ssam
248164426SsamSYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters");
249164426Ssam
250164426Ssamstatic int npe_debug = 0;
251164426SsamSYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
252164426Ssam	   0, "IXP425 NPE network interface debug msgs");
253164426SsamTUNABLE_INT("hw.npe.npe", &npe_debug);
254164426Ssam#define	DPRINTF(sc, fmt, ...) do {					\
255164426Ssam	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
256164426Ssam} while (0)
257164426Ssam#define	DPRINTFn(n, sc, fmt, ...) do {					\
258164426Ssam	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
259164426Ssam} while (0)
260164426Ssamstatic int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
261164426SsamSYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
262164426Ssam	    0, "periodic work interval (secs)");
263164426SsamTUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
264164426Ssam
265164426Ssamstatic	int npe_rxbuf = 64;		/* # rx buffers to allocate */
266164426SsamSYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
267164426Ssam	    0, "rx buffers allocated");
268164426SsamTUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
269164426Ssamstatic	int npe_txbuf = 128;		/* # tx buffers to allocate */
270164426SsamSYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
271164426Ssam	    0, "tx buffers allocated");
272164426SsamTUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
273164426Ssam
274164426Ssamstatic int
275164426Ssamnpe_probe(device_t dev)
276164426Ssam{
277164426Ssam	int unit = device_get_unit(dev);
278164426Ssam
279164426Ssam	if (unit >= NPE_PORTS_MAX) {
280164426Ssam		device_printf(dev, "unit %d not supported\n", unit);
281164426Ssam		return EINVAL;
282164426Ssam	}
283164426Ssam	/* XXX check feature register to see if enabled */
284164426Ssam	device_set_desc(dev, npeconfig[unit].desc);
285164426Ssam	return 0;
286164426Ssam}
287164426Ssam
288164426Ssamstatic int
289164426Ssamnpe_attach(device_t dev)
290164426Ssam{
291164426Ssam	struct npe_softc *sc = device_get_softc(dev);
292164426Ssam	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
293164426Ssam	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
294164426Ssam	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
295164426Ssam	struct ifnet *ifp = NULL;
296164426Ssam	int error;
297164426Ssam	u_char eaddr[6];
298164426Ssam
299164426Ssam	sc->sc_dev = dev;
300164426Ssam	sc->sc_iot = sa->sc_iot;
301164426Ssam	NPE_LOCK_INIT(sc);
302164426Ssam	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
303164426Ssam	sc->sc_debug = npe_debug;
304164426Ssam	sc->sc_tickinterval = npe_tickinterval;
305164426Ssam
306164426Ssam	sc->sc_npe = ixpnpe_attach(dev);
307164426Ssam	if (sc->sc_npe == NULL) {
308164426Ssam		error = EIO;		/* XXX */
309164426Ssam		goto out;
310164426Ssam	}
311164426Ssam
312164426Ssam	error = npe_activate(dev);
313164426Ssam	if (error)
314164426Ssam		goto out;
315164426Ssam
316164426Ssam	npe_getmac(sc, eaddr);
317164426Ssam
318164426Ssam	/* NB: must be setup prior to invoking mii code */
319164426Ssam	sc->sc_ifp = ifp = if_alloc(IFT_ETHER);
320164426Ssam	if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
321164426Ssam		device_printf(dev, "Cannot find my PHY.\n");
322164426Ssam		error = ENXIO;
323164426Ssam		goto out;
324164426Ssam	}
325164426Ssam
326164426Ssam	ifp->if_softc = sc;
327164426Ssam	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
328164426Ssam	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329164426Ssam	ifp->if_start = npestart;
330164426Ssam	ifp->if_ioctl = npeioctl;
331164426Ssam	ifp->if_init = npeinit;
332164426Ssam	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
333166625Smlaier	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
334164426Ssam	IFQ_SET_READY(&ifp->if_snd);
335164426Ssam	ifp->if_linkmib = &sc->mibdata;
336164426Ssam	ifp->if_linkmiblen = sizeof(sc->mibdata);
337164426Ssam	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
338164426Ssam#ifdef DEVICE_POLLING
339164426Ssam	ifp->if_capabilities |= IFCAP_POLLING;
340164426Ssam#endif
341164426Ssam
342164426Ssam	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
343164426Ssam	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
344164426Ssam	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
345164426Ssam	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
346164426Ssam
347164426Ssam	ether_ifattach(ifp, eaddr);
348164426Ssam	return 0;
349164426Ssamout:
350164426Ssam	npe_deactivate(dev);
351164426Ssam	if (ifp != NULL)
352164426Ssam		if_free(ifp);
353164426Ssam	return error;
354164426Ssam}
355164426Ssam
356164426Ssamstatic int
357164426Ssamnpe_detach(device_t dev)
358164426Ssam{
359164426Ssam	struct npe_softc *sc = device_get_softc(dev);
360164426Ssam	struct ifnet *ifp = sc->sc_ifp;
361164426Ssam
362164426Ssam#ifdef DEVICE_POLLING
363164426Ssam	if (ifp->if_capenable & IFCAP_POLLING)
364164426Ssam		ether_poll_deregister(ifp);
365164426Ssam#endif
366164426Ssam	npestop(sc);
367164426Ssam	if (ifp != NULL) {
368164426Ssam		ether_ifdetach(ifp);
369164426Ssam		if_free(ifp);
370164426Ssam	}
371164426Ssam	NPE_LOCK_DESTROY(sc);
372164426Ssam	npe_deactivate(dev);
373164426Ssam	if (sc->sc_npe != NULL)
374164426Ssam		ixpnpe_detach(sc->sc_npe);
375164426Ssam	return 0;
376164426Ssam}
377164426Ssam
378164426Ssam/*
379164426Ssam * Compute and install the multicast filter.
380164426Ssam */
381164426Ssamstatic void
382164426Ssamnpe_setmcast(struct npe_softc *sc)
383164426Ssam{
384164426Ssam	struct ifnet *ifp = sc->sc_ifp;
385164426Ssam	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
386164426Ssam	int i;
387164426Ssam
388164426Ssam	if (ifp->if_flags & IFF_PROMISC) {
389164426Ssam		memset(mask, 0, ETHER_ADDR_LEN);
390164426Ssam		memset(addr, 0, ETHER_ADDR_LEN);
391164426Ssam	} else if (ifp->if_flags & IFF_ALLMULTI) {
392164426Ssam		static const uint8_t allmulti[ETHER_ADDR_LEN] =
393164426Ssam		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
394164426Ssam		memcpy(mask, allmulti, ETHER_ADDR_LEN);
395164426Ssam		memcpy(addr, allmulti, ETHER_ADDR_LEN);
396164426Ssam	} else {
397164426Ssam		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
398164426Ssam		struct ifmultiaddr *ifma;
399164426Ssam		const uint8_t *mac;
400164426Ssam
401164426Ssam		memset(clr, 0, ETHER_ADDR_LEN);
402164426Ssam		memset(set, 0xff, ETHER_ADDR_LEN);
403164426Ssam
404164426Ssam		IF_ADDR_LOCK(ifp);
405164426Ssam		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
406164426Ssam			if (ifma->ifma_addr->sa_family != AF_LINK)
407164426Ssam				continue;
408164426Ssam			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
409164426Ssam			for (i = 0; i < ETHER_ADDR_LEN; i++) {
410164426Ssam				clr[i] |= mac[i];
411164426Ssam				set[i] &= mac[i];
412164426Ssam			}
413164426Ssam		}
414164426Ssam		IF_ADDR_UNLOCK(ifp);
415164426Ssam
416164426Ssam		for (i = 0; i < ETHER_ADDR_LEN; i++) {
417164426Ssam			mask[i] = set[i] | ~clr[i];
418164426Ssam			addr[i] = set[i];
419164426Ssam		}
420164426Ssam	}
421164426Ssam
422164426Ssam	/*
423164426Ssam	 * Write the mask and address registers.
424164426Ssam	 */
425164426Ssam	for (i = 0; i < ETHER_ADDR_LEN; i++) {
426164426Ssam		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
427164426Ssam		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
428164426Ssam	}
429164426Ssam}
430164426Ssam
431164426Ssamstatic void
432164426Ssamnpe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
433164426Ssam{
434164426Ssam	struct npe_softc *sc;
435164426Ssam
436164426Ssam	if (error != 0)
437164426Ssam		return;
438164426Ssam	sc = (struct npe_softc *)arg;
439164426Ssam	sc->buf_phys = segs[0].ds_addr;
440164426Ssam}
441164426Ssam
442164426Ssamstatic int
443164426Ssamnpe_dma_setup(struct npe_softc *sc, struct npedma *dma,
444164426Ssam	const char *name, int nbuf, int maxseg)
445164426Ssam{
446164426Ssam	int error, i;
447164426Ssam
448164426Ssam	memset(dma, 0, sizeof(dma));
449164426Ssam
450164426Ssam	dma->name = name;
451164426Ssam	dma->nbuf = nbuf;
452164426Ssam
453164426Ssam	/* DMA tag for mapped mbufs  */
454166064Scognet	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
455164426Ssam	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
456164426Ssam	    MCLBYTES, maxseg, MCLBYTES, 0,
457164426Ssam	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
458164426Ssam	if (error != 0) {
459164426Ssam		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
460164426Ssam		     "error %u\n", dma->name, error);
461164426Ssam		return error;
462164426Ssam	}
463164426Ssam
464164426Ssam	/* DMA tag and map for the NPE buffers */
465166064Scognet	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
466164426Ssam	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
467164426Ssam	    nbuf * sizeof(struct npehwbuf), 1,
468164426Ssam	    nbuf * sizeof(struct npehwbuf), 0,
469164426Ssam	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
470164426Ssam	if (error != 0) {
471164426Ssam		device_printf(sc->sc_dev,
472164426Ssam		    "unable to create %s npebuf dma tag, error %u\n",
473164426Ssam		    dma->name, error);
474164426Ssam		return error;
475164426Ssam	}
476164426Ssam	/* XXX COHERENT for now */
477164426Ssam	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
478164426Ssam	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
479164426Ssam	    &dma->buf_map) != 0) {
480164426Ssam		device_printf(sc->sc_dev,
481164426Ssam		     "unable to allocate memory for %s h/w buffers, error %u\n",
482164426Ssam		     dma->name, error);
483164426Ssam		return error;
484164426Ssam	}
485164426Ssam	/* XXX M_TEMP */
486164426Ssam	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
487164426Ssam	if (dma->buf == NULL) {
488164426Ssam		device_printf(sc->sc_dev,
489164426Ssam		     "unable to allocate memory for %s s/w buffers\n",
490164426Ssam		     dma->name);
491164426Ssam		return error;
492164426Ssam	}
493164426Ssam	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
494164426Ssam	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
495164426Ssam		device_printf(sc->sc_dev,
496164426Ssam		     "unable to map memory for %s h/w buffers, error %u\n",
497164426Ssam		     dma->name, error);
498164426Ssam		return error;
499164426Ssam	}
500164426Ssam	dma->buf_phys = sc->buf_phys;
501164426Ssam	for (i = 0; i < dma->nbuf; i++) {
502164426Ssam		struct npebuf *npe = &dma->buf[i];
503164426Ssam		struct npehwbuf *hw = &dma->hwbuf[i];
504164426Ssam
505164426Ssam		/* calculate offset to shared area */
506164426Ssam		npe->ix_neaddr = dma->buf_phys +
507164426Ssam			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
508164426Ssam		KASSERT((npe->ix_neaddr & 0x1f) == 0,
509164426Ssam		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
510164426Ssam		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
511164426Ssam				&npe->ix_map);
512164426Ssam		if (error != 0) {
513164426Ssam			device_printf(sc->sc_dev,
514164426Ssam			     "unable to create dmamap for %s buffer %u, "
515164426Ssam			     "error %u\n", dma->name, i, error);
516164426Ssam			return error;
517164426Ssam		}
518164426Ssam		npe->ix_hw = hw;
519164426Ssam	}
520164426Ssam	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
521164426Ssam	return 0;
522164426Ssam}
523164426Ssam
524164426Ssamstatic void
525164426Ssamnpe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
526164426Ssam{
527164426Ssam	int i;
528164426Ssam
529164426Ssam	if (dma->hwbuf != NULL) {
530164426Ssam		for (i = 0; i < dma->nbuf; i++) {
531164426Ssam			struct npebuf *npe = &dma->buf[i];
532164426Ssam			bus_dmamap_destroy(dma->mtag, npe->ix_map);
533164426Ssam		}
534164426Ssam		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
535164426Ssam		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
536164426Ssam		bus_dmamap_destroy(dma->buf_tag, dma->buf_map);
537164426Ssam	}
538164426Ssam	if (dma->buf != NULL)
539164426Ssam		free(dma->buf, M_TEMP);
540164426Ssam	if (dma->buf_tag)
541164426Ssam		bus_dma_tag_destroy(dma->buf_tag);
542164426Ssam	if (dma->mtag)
543164426Ssam		bus_dma_tag_destroy(dma->mtag);
544164426Ssam	memset(dma, 0, sizeof(*dma));
545164426Ssam}
546164426Ssam
547164426Ssamstatic int
548164426Ssamnpe_activate(device_t dev)
549164426Ssam{
550164426Ssam	struct npe_softc * sc = device_get_softc(dev);
551164426Ssam	int unit = device_get_unit(dev);
552164426Ssam	int error, i;
553169954Ssam	uint32_t imageid;
554164426Ssam
555169954Ssam	/*
556169954Ssam	 * Load NPE firmware and start it running.  We assume
557169954Ssam	 * that minor version bumps remain compatible so probe
558169954Ssam	 * the firmware image starting with the expected version
559169954Ssam	 * and then bump the minor version up to the max.
560169954Ssam	 */
561169954Ssam	imageid = npeconfig[unit].imageid;
562169954Ssam	for (;;) {
563169954Ssam		error = ixpnpe_init(sc->sc_npe, "npe_fw", imageid);
564169954Ssam		if (error == 0)
565169954Ssam			break;
566169954Ssam		/* ESRCH is returned when the requested image is not present */
567169954Ssam		if (error != ESRCH)
568169954Ssam			return error;
569169954Ssam		/* bump the minor version up to the max possible */
570169954Ssam		if (NPEIMAGE_MINOR(imageid) == 0xff)
571169954Ssam			return error;
572169954Ssam		imageid++;
573169954Ssam	}
574164426Ssam
575164426Ssam	if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase,
576164426Ssam	    npeconfig[unit].regsize, 0, &sc->sc_ioh)) {
577164426Ssam		device_printf(dev, "Cannot map registers 0x%x:0x%x\n",
578164426Ssam		    npeconfig[unit].regbase, npeconfig[unit].regsize);
579164426Ssam		return ENOMEM;
580164426Ssam	}
581164426Ssam
582164426Ssam	if (npeconfig[unit].miibase != npeconfig[unit].regbase) {
583164426Ssam		/*
584164426Ssam		 * The PHY's are only accessible from one MAC (it appears)
585164426Ssam		 * so for other MAC's setup an additional mapping for
586164426Ssam		 * frobbing the PHY registers.
587164426Ssam		 */
588164426Ssam		if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase,
589164426Ssam		    npeconfig[unit].miisize, 0, &sc->sc_miih)) {
590164426Ssam			device_printf(dev,
591164426Ssam			    "Cannot map MII registers 0x%x:0x%x\n",
592164426Ssam			    npeconfig[unit].miibase, npeconfig[unit].miisize);
593164426Ssam			return ENOMEM;
594164426Ssam		}
595164426Ssam	} else
596164426Ssam		sc->sc_miih = sc->sc_ioh;
597164426Ssam	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
598164426Ssam	if (error != 0)
599164426Ssam		return error;
600164426Ssam	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
601164426Ssam	if (error != 0)
602164426Ssam		return error;
603164426Ssam
604164426Ssam	/* setup statistics block */
605166064Scognet	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
606164426Ssam	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
607164426Ssam	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
608164426Ssam	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
609164426Ssam	if (error != 0) {
610164426Ssam		device_printf(sc->sc_dev, "unable to create stats tag, "
611164426Ssam		     "error %u\n", error);
612164426Ssam		return error;
613164426Ssam	}
614164426Ssam	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
615164426Ssam	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
616164426Ssam		device_printf(sc->sc_dev,
617164426Ssam		     "unable to allocate memory for stats block, error %u\n",
618164426Ssam		     error);
619164426Ssam		return error;
620164426Ssam	}
621164426Ssam	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
622164426Ssam	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
623164426Ssam		device_printf(sc->sc_dev,
624164426Ssam		     "unable to load memory for stats block, error %u\n",
625164426Ssam		     error);
626164426Ssam		return error;
627164426Ssam	}
628164426Ssam	sc->sc_stats_phys = sc->buf_phys;
629164426Ssam
630164426Ssam	/* XXX disable half-bridge LEARNING+FILTERING feature */
631164426Ssam
632164426Ssam	/*
633164426Ssam	 * Setup h/w rx/tx queues.  There are four q's:
634164426Ssam	 *   rx		inbound q of rx'd frames
635164426Ssam	 *   rx_free	pool of ixpbuf's for receiving frames
636164426Ssam	 *   tx		outbound q of frames to send
637164426Ssam	 *   tx_done	q of tx frames that have been processed
638164426Ssam	 *
639164426Ssam	 * The NPE handles the actual tx/rx process and the q manager
640164426Ssam	 * handles the queues.  The driver just writes entries to the
641164426Ssam	 * q manager mailbox's and gets callbacks when there are rx'd
642164426Ssam	 * frames to process or tx'd frames to reap.  These callbacks
643164426Ssam	 * are controlled by the q configurations; e.g. we get a
644164426Ssam	 * callback when tx_done has 2 or more frames to process and
645164426Ssam	 * when the rx q has at least one frame.  These setings can
646164426Ssam	 * changed at the time the q is configured.
647164426Ssam	 */
648164426Ssam	sc->rx_qid = npeconfig[unit].rx_qid;
649164426Ssam	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
650164426Ssam		IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
651164426Ssam	sc->rx_freeqid = npeconfig[unit].rx_freeqid;
652164426Ssam	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
653164426Ssam	/* tell the NPE to direct all traffic to rx_qid */
654164426Ssam#if 0
655164426Ssam	for (i = 0; i < 8; i++)
656164426Ssam#else
657164426Ssamdevice_printf(sc->sc_dev, "remember to fix rx q setup\n");
658164426Ssam	for (i = 0; i < 4; i++)
659164426Ssam#endif
660164426Ssam		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
661164426Ssam
662164426Ssam	sc->tx_qid = npeconfig[unit].tx_qid;
663164426Ssam	sc->tx_doneqid = npeconfig[unit].tx_doneqid;
664164426Ssam	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
665164426Ssam	if (tx_doneqid == -1) {
666164426Ssam		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
667164426Ssam			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
668164426Ssam		tx_doneqid = sc->tx_doneqid;
669164426Ssam	}
670164426Ssam
671164426Ssam	KASSERT(npes[npeconfig[unit].npeid] == NULL,
672164426Ssam	    ("npe %u already setup", npeconfig[unit].npeid));
673164426Ssam	npes[npeconfig[unit].npeid] = sc;
674164426Ssam
675164426Ssam	return 0;
676164426Ssam}
677164426Ssam
678164426Ssamstatic void
679164426Ssamnpe_deactivate(device_t dev)
680164426Ssam{
681164426Ssam	struct npe_softc *sc = device_get_softc(dev);
682164426Ssam	int unit = device_get_unit(dev);
683164426Ssam
684164426Ssam	npes[npeconfig[unit].npeid] = NULL;
685164426Ssam
686164426Ssam	/* XXX disable q's */
687164426Ssam	if (sc->sc_npe != NULL)
688164426Ssam		ixpnpe_stop(sc->sc_npe);
689164426Ssam	if (sc->sc_stats != NULL) {
690164426Ssam		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
691164426Ssam		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
692164426Ssam			sc->sc_stats_map);
693164426Ssam		bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
694164426Ssam	}
695164426Ssam	if (sc->sc_stats_tag != NULL)
696164426Ssam		bus_dma_tag_destroy(sc->sc_stats_tag);
697164426Ssam	npe_dma_destroy(sc, &sc->txdma);
698164426Ssam	npe_dma_destroy(sc, &sc->rxdma);
699164426Ssam	bus_generic_detach(sc->sc_dev);
700164426Ssam	if (sc->sc_mii)
701164426Ssam		device_delete_child(sc->sc_dev, sc->sc_mii);
702164426Ssam#if 0
703164426Ssam	/* XXX sc_ioh and sc_miih */
704164426Ssam	if (sc->mem_res)
705164426Ssam		bus_release_resource(dev, SYS_RES_IOPORT,
706164426Ssam		    rman_get_rid(sc->mem_res), sc->mem_res);
707164426Ssam	sc->mem_res = 0;
708164426Ssam#endif
709164426Ssam}
710164426Ssam
711164426Ssam/*
712164426Ssam * Change media according to request.
713164426Ssam */
714164426Ssamstatic int
715164426Ssamnpe_ifmedia_update(struct ifnet *ifp)
716164426Ssam{
717164426Ssam	struct npe_softc *sc = ifp->if_softc;
718164426Ssam	struct mii_data *mii;
719164426Ssam
720164426Ssam	mii = device_get_softc(sc->sc_mii);
721164426Ssam	NPE_LOCK(sc);
722164426Ssam	mii_mediachg(mii);
723164426Ssam	/* XXX push state ourself? */
724164426Ssam	NPE_UNLOCK(sc);
725164426Ssam	return (0);
726164426Ssam}
727164426Ssam
728164426Ssam/*
729164426Ssam * Notify the world which media we're using.
730164426Ssam */
731164426Ssamstatic void
732164426Ssamnpe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
733164426Ssam{
734164426Ssam	struct npe_softc *sc = ifp->if_softc;
735164426Ssam	struct mii_data *mii;
736164426Ssam
737164426Ssam	mii = device_get_softc(sc->sc_mii);
738164426Ssam	NPE_LOCK(sc);
739164426Ssam	mii_pollstat(mii);
740164426Ssam	ifmr->ifm_active = mii->mii_media_active;
741164426Ssam	ifmr->ifm_status = mii->mii_media_status;
742164426Ssam	NPE_UNLOCK(sc);
743164426Ssam}
744164426Ssam
745164426Ssamstatic void
746164426Ssamnpe_addstats(struct npe_softc *sc)
747164426Ssam{
748164426Ssam#define	MIBADD(x)	sc->mibdata.x += be32toh(ns->x)
749164426Ssam	struct ifnet *ifp = sc->sc_ifp;
750164426Ssam	struct npestats *ns = sc->sc_stats;
751164426Ssam
752164426Ssam	MIBADD(dot3StatsAlignmentErrors);
753164426Ssam	MIBADD(dot3StatsFCSErrors);
754164426Ssam	MIBADD(dot3StatsSingleCollisionFrames);
755164426Ssam	MIBADD(dot3StatsMultipleCollisionFrames);
756164426Ssam	MIBADD(dot3StatsDeferredTransmissions);
757164426Ssam	MIBADD(dot3StatsLateCollisions);
758164426Ssam	MIBADD(dot3StatsExcessiveCollisions);
759164426Ssam	MIBADD(dot3StatsInternalMacTransmitErrors);
760164426Ssam	MIBADD(dot3StatsCarrierSenseErrors);
761164426Ssam	sc->mibdata.dot3StatsFrameTooLongs +=
762164426Ssam	      be32toh(ns->RxLargeFramesDiscards)
763164426Ssam	    + be32toh(ns->TxLargeFrameDiscards);
764164426Ssam	MIBADD(dot3StatsInternalMacReceiveErrors);
765164426Ssam	sc->mibdata.dot3StatsMissedFrames +=
766164426Ssam	      be32toh(ns->RxOverrunDiscards)
767164426Ssam	    + be32toh(ns->RxUnderflowEntryDiscards);
768164426Ssam
769164426Ssam	ifp->if_oerrors +=
770164426Ssam		  be32toh(ns->dot3StatsInternalMacTransmitErrors)
771164426Ssam		+ be32toh(ns->dot3StatsCarrierSenseErrors)
772164426Ssam		+ be32toh(ns->TxVLANIdFilterDiscards)
773164426Ssam		;
774164426Ssam	ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
775164426Ssam		+ be32toh(ns->dot3StatsInternalMacReceiveErrors)
776164426Ssam		+ be32toh(ns->RxOverrunDiscards)
777164426Ssam		+ be32toh(ns->RxUnderflowEntryDiscards)
778164426Ssam		;
779164426Ssam	ifp->if_collisions +=
780164426Ssam		  be32toh(ns->dot3StatsSingleCollisionFrames)
781164426Ssam		+ be32toh(ns->dot3StatsMultipleCollisionFrames)
782164426Ssam		;
783164426Ssam#undef MIBADD
784164426Ssam}
785164426Ssam
786164426Ssamstatic void
787164426Ssamnpe_tick(void *xsc)
788164426Ssam{
789164426Ssam#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
790164426Ssam	struct npe_softc *sc = xsc;
791164426Ssam	struct mii_data *mii = device_get_softc(sc->sc_mii);
792164426Ssam	uint32_t msg[2];
793164426Ssam
794164426Ssam	NPE_ASSERT_LOCKED(sc);
795164426Ssam
796164426Ssam	/*
797164426Ssam	 * NB: to avoid sleeping with the softc lock held we
798164426Ssam	 * split the NPE msg processing into two parts.  The
799164426Ssam	 * request for statistics is sent w/o waiting for a
800164426Ssam	 * reply and then on the next tick we retrieve the
801164426Ssam	 * results.  This works because npe_tick is the only
802164426Ssam	 * code that talks via the mailbox's (except at setup).
803164426Ssam	 * This likely can be handled better.
804164426Ssam	 */
805164426Ssam	if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
806164426Ssam		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
807164426Ssam		    BUS_DMASYNC_POSTREAD);
808164426Ssam		npe_addstats(sc);
809164426Ssam	}
810164426Ssam	npe_updatestats(sc);
811164426Ssam	mii_tick(mii);
812164426Ssam
813166339Skevlo	npewatchdog(sc);
814166339Skevlo
815164426Ssam	/* schedule next poll */
816164426Ssam	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
817164426Ssam#undef ACK
818164426Ssam}
819164426Ssam
820164426Ssamstatic void
821164426Ssamnpe_setmac(struct npe_softc *sc, u_char *eaddr)
822164426Ssam{
823164426Ssam	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
824164426Ssam	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
825164426Ssam	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
826164426Ssam	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
827164426Ssam	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
828164426Ssam	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
829164426Ssam
830164426Ssam}
831164426Ssam
832164426Ssamstatic void
833164426Ssamnpe_getmac(struct npe_softc *sc, u_char *eaddr)
834164426Ssam{
835164426Ssam	/* NB: the unicast address appears to be loaded from EEPROM on reset */
836164426Ssam	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
837164426Ssam	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
838164426Ssam	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
839164426Ssam	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
840164426Ssam	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
841164426Ssam	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
842164426Ssam}
843164426Ssam
844164426Ssamstruct txdone {
845164426Ssam	struct npebuf *head;
846164426Ssam	struct npebuf **tail;
847164426Ssam	int count;
848164426Ssam};
849164426Ssam
850164426Ssamstatic __inline void
851164426Ssamnpe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
852164426Ssam{
853164426Ssam	struct ifnet *ifp = sc->sc_ifp;
854164426Ssam
855164426Ssam	NPE_LOCK(sc);
856164426Ssam	*td->tail = sc->tx_free;
857164426Ssam	sc->tx_free = td->head;
858164426Ssam	/*
859164426Ssam	 * We're no longer busy, so clear the busy flag and call the
860164426Ssam	 * start routine to xmit more packets.
861164426Ssam	 */
862164426Ssam	ifp->if_opackets += td->count;
863164426Ssam	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
864166339Skevlo	sc->npe_watchdog_timer = 0;
865164426Ssam	npestart_locked(ifp);
866164426Ssam	NPE_UNLOCK(sc);
867164426Ssam}
868164426Ssam
869164426Ssam/*
870164426Ssam * Q manager callback on tx done queue.  Reap mbufs
871164426Ssam * and return tx buffers to the free list.  Finally
872164426Ssam * restart output.  Note the microcode has only one
873164426Ssam * txdone q wired into it so we must use the NPE ID
874164426Ssam * returned with each npehwbuf to decide where to
875164426Ssam * send buffers.
876164426Ssam */
877164426Ssamstatic void
878164426Ssamnpe_txdone(int qid, void *arg)
879164426Ssam{
880164426Ssam#define	P2V(a, dma) \
881164426Ssam	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
882164426Ssam	struct npe_softc *sc0 = arg;
883164426Ssam	struct npe_softc *sc;
884164426Ssam	struct npebuf *npe;
885164426Ssam	struct txdone *td, q[NPE_MAX];
886164426Ssam	uint32_t entry;
887164426Ssam
888164426Ssam	/* XXX no NPE-A support */
889164426Ssam	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
890164426Ssam	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
891164426Ssam	/* XXX max # at a time? */
892164426Ssam	while (ixpqmgr_qread(qid, &entry) == 0) {
893164426Ssam		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
894164426Ssam		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
895164426Ssam
896164426Ssam		sc = npes[NPE_QM_Q_NPE(entry)];
897164426Ssam		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
898164426Ssam		m_freem(npe->ix_m);
899164426Ssam		npe->ix_m = NULL;
900164426Ssam
901164426Ssam		td = &q[NPE_QM_Q_NPE(entry)];
902164426Ssam		*td->tail = npe;
903164426Ssam		td->tail = &npe->ix_next;
904164426Ssam		td->count++;
905164426Ssam	}
906164426Ssam
907164426Ssam	if (q[NPE_B].count)
908164426Ssam		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
909164426Ssam	if (q[NPE_C].count)
910164426Ssam		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
911164426Ssam#undef P2V
912164426Ssam}
913164426Ssam
914164426Ssamstatic int
915164426Ssamnpe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
916164426Ssam{
917164426Ssam	bus_dma_segment_t segs[1];
918164426Ssam	struct npedma *dma = &sc->rxdma;
919164426Ssam	struct npehwbuf *hw;
920164426Ssam	int error, nseg;
921164426Ssam
922164426Ssam	if (m == NULL) {
923164426Ssam		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
924164426Ssam		if (m == NULL)
925164426Ssam			return ENOBUFS;
926164426Ssam	}
927164426Ssam	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
928164426Ssam		("ext_size %d", m->m_ext.ext_size));
929164426Ssam	m->m_pkthdr.len = m->m_len = 1536;
930164426Ssam	/* backload payload and align ip hdr */
931164426Ssam	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
932164426Ssam	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
933164426Ssam			segs, &nseg, 0);
934164426Ssam	if (error != 0) {
935164426Ssam		m_freem(m);
936164426Ssam		return error;
937164426Ssam	}
938164426Ssam	hw = npe->ix_hw;
939164426Ssam	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
940164426Ssam	/* NB: NPE requires length be a multiple of 64 */
941164426Ssam	/* NB: buffer length is shifted in word */
942164426Ssam	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
943164426Ssam	hw->ix_ne[0].next = 0;
944164426Ssam	npe->ix_m = m;
945164426Ssam	/* Flush the memory in the mbuf */
946164426Ssam	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
947164426Ssam	return 0;
948164426Ssam}
949164426Ssam
950164426Ssam/*
951164426Ssam * RX q processing for a specific NPE.  Claim entries
952164426Ssam * from the hardware queue and pass the frames up the
953164426Ssam * stack. Pass the rx buffers to the free list.
954164426Ssam */
955164426Ssamstatic void
956164426Ssamnpe_rxdone(int qid, void *arg)
957164426Ssam{
958164426Ssam#define	P2V(a, dma) \
959164426Ssam	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
960164426Ssam	struct npe_softc *sc = arg;
961164426Ssam	struct npedma *dma = &sc->rxdma;
962164426Ssam	uint32_t entry;
963164426Ssam
964164426Ssam	while (ixpqmgr_qread(qid, &entry) == 0) {
965164426Ssam		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
966164426Ssam		struct mbuf *m;
967164426Ssam
968164426Ssam		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
969164426Ssam		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
970164426Ssam		/*
971164426Ssam		 * Allocate a new mbuf to replenish the rx buffer.
972164426Ssam		 * If doing so fails we drop the rx'd frame so we
973164426Ssam		 * can reuse the previous mbuf.  When we're able to
974164426Ssam		 * allocate a new mbuf dispatch the mbuf w/ rx'd
975164426Ssam		 * data up the stack and replace it with the newly
976164426Ssam		 * allocated one.
977164426Ssam		 */
978164426Ssam		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
979164426Ssam		if (m != NULL) {
980164426Ssam			struct mbuf *mrx = npe->ix_m;
981164426Ssam			struct npehwbuf *hw = npe->ix_hw;
982164426Ssam			struct ifnet *ifp = sc->sc_ifp;
983164426Ssam
984164426Ssam			/* Flush mbuf memory for rx'd data */
985164426Ssam			bus_dmamap_sync(dma->mtag, npe->ix_map,
986164426Ssam			    BUS_DMASYNC_POSTREAD);
987164426Ssam
988164426Ssam			/* XXX flush hw buffer; works now 'cuz coherent */
989164426Ssam			/* set m_len etc. per rx frame size */
990164426Ssam			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
991164426Ssam			mrx->m_pkthdr.len = mrx->m_len;
992164426Ssam			mrx->m_pkthdr.rcvif = ifp;
993164426Ssam			mrx->m_flags |= M_HASFCS;
994164426Ssam
995164426Ssam			ifp->if_ipackets++;
996164426Ssam			ifp->if_input(ifp, mrx);
997164426Ssam		} else {
998164426Ssam			/* discard frame and re-use mbuf */
999164426Ssam			m = npe->ix_m;
1000164426Ssam		}
1001164426Ssam		if (npe_rxbuf_init(sc, npe, m) == 0) {
1002164426Ssam			/* return npe buf to rx free list */
1003164426Ssam			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1004164426Ssam		} else {
1005164426Ssam			/* XXX should not happen */
1006164426Ssam		}
1007164426Ssam	}
1008164426Ssam#undef P2V
1009164426Ssam}
1010164426Ssam
1011164426Ssam#ifdef DEVICE_POLLING
1012164426Ssamstatic void
1013164426Ssamnpe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1014164426Ssam{
1015164426Ssam	struct npe_softc *sc = ifp->if_softc;
1016164426Ssam
1017164426Ssam	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1018164426Ssam		npe_rxdone(sc->rx_qid, sc);
1019164426Ssam		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1020164426Ssam	}
1021164426Ssam}
1022164426Ssam#endif /* DEVICE_POLLING */
1023164426Ssam
1024164426Ssamstatic void
1025164426Ssamnpe_startxmit(struct npe_softc *sc)
1026164426Ssam{
1027164426Ssam	struct npedma *dma = &sc->txdma;
1028164426Ssam	int i;
1029164426Ssam
1030164426Ssam	NPE_ASSERT_LOCKED(sc);
1031164426Ssam	sc->tx_free = NULL;
1032164426Ssam	for (i = 0; i < dma->nbuf; i++) {
1033164426Ssam		struct npebuf *npe = &dma->buf[i];
1034164426Ssam		if (npe->ix_m != NULL) {
1035164426Ssam			/* NB: should not happen */
1036164426Ssam			device_printf(sc->sc_dev,
1037164426Ssam			    "%s: free mbuf at entry %u\n", __func__, i);
1038164426Ssam			m_freem(npe->ix_m);
1039164426Ssam		}
1040164426Ssam		npe->ix_m = NULL;
1041164426Ssam		npe->ix_next = sc->tx_free;
1042164426Ssam		sc->tx_free = npe;
1043164426Ssam	}
1044164426Ssam}
1045164426Ssam
1046164426Ssamstatic void
1047164426Ssamnpe_startrecv(struct npe_softc *sc)
1048164426Ssam{
1049164426Ssam	struct npedma *dma = &sc->rxdma;
1050164426Ssam	struct npebuf *npe;
1051164426Ssam	int i;
1052164426Ssam
1053164426Ssam	NPE_ASSERT_LOCKED(sc);
1054164426Ssam	for (i = 0; i < dma->nbuf; i++) {
1055164426Ssam		npe = &dma->buf[i];
1056164426Ssam		npe_rxbuf_init(sc, npe, npe->ix_m);
1057164426Ssam		/* set npe buf on rx free list */
1058164426Ssam		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1059164426Ssam	}
1060164426Ssam}
1061164426Ssam
1062164426Ssam/*
1063164426Ssam * Reset and initialize the chip
1064164426Ssam */
1065164426Ssamstatic void
1066164426Ssamnpeinit_locked(void *xsc)
1067164426Ssam{
1068164426Ssam	struct npe_softc *sc = xsc;
1069164426Ssam	struct ifnet *ifp = sc->sc_ifp;
1070164426Ssam
1071164426Ssam	NPE_ASSERT_LOCKED(sc);
1072164426Ssamif (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1073164426Ssam
1074164426Ssam	/*
1075164426Ssam	 * Reset MAC core.
1076164426Ssam	 */
1077164426Ssam	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1078164426Ssam	DELAY(NPE_MAC_RESET_DELAY);
1079164426Ssam	/* configure MAC to generate MDC clock */
1080164426Ssam	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1081164426Ssam
1082164426Ssam	/* disable transmitter and reciver in the MAC */
1083164426Ssam 	WR4(sc, NPE_MAC_RX_CNTRL1,
1084164426Ssam	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1085164426Ssam 	WR4(sc, NPE_MAC_TX_CNTRL1,
1086164426Ssam	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1087164426Ssam
1088164426Ssam	/*
1089164426Ssam	 * Set the MAC core registers.
1090164426Ssam	 */
1091164426Ssam	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1092164426Ssam	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1093164426Ssam	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1094164426Ssam	/* thresholds determined by NPE firmware FS */
1095164426Ssam	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1096164426Ssam	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1097164426Ssam	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1098164426Ssam	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1099164426Ssam	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1100164426Ssam	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1101164426Ssam	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1102164426Ssam	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1103164426Ssam
1104164426Ssam	WR4(sc, NPE_MAC_TX_CNTRL1,
1105164426Ssam		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1106164426Ssam		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1107164426Ssam		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1108164426Ssam		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1109164426Ssam	/* XXX pad strip? */
1110164426Ssam	WR4(sc, NPE_MAC_RX_CNTRL1,
1111164426Ssam		  NPE_RX_CNTRL1_CRC_EN		/* include CRC/FCS */
1112164426Ssam		| NPE_RX_CNTRL1_PAUSE_EN);	/* ena pause frame handling */
1113164426Ssam	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1114164426Ssam
1115164426Ssam	npe_setmac(sc, IF_LLADDR(ifp));
1116164426Ssam	npe_setmcast(sc);
1117164426Ssam
1118164426Ssam	npe_startxmit(sc);
1119164426Ssam	npe_startrecv(sc);
1120164426Ssam
1121164426Ssam	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1122164426Ssam	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1123166339Skevlo	sc->npe_watchdog_timer = 0;		/* just in case */
1124164426Ssam
1125164426Ssam	/* enable transmitter and reciver in the MAC */
1126164426Ssam 	WR4(sc, NPE_MAC_RX_CNTRL1,
1127164426Ssam	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1128164426Ssam 	WR4(sc, NPE_MAC_TX_CNTRL1,
1129164426Ssam	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1130164426Ssam
1131164426Ssam	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1132164426Ssam}
1133164426Ssam
1134164426Ssamstatic void
1135164426Ssamnpeinit(void *xsc)
1136164426Ssam{
1137164426Ssam	struct npe_softc *sc = xsc;
1138164426Ssam	NPE_LOCK(sc);
1139164426Ssam	npeinit_locked(sc);
1140164426Ssam	NPE_UNLOCK(sc);
1141164426Ssam}
1142164426Ssam
1143164426Ssam/*
1144164426Ssam * Defragment an mbuf chain, returning at most maxfrags separate
1145164426Ssam * mbufs+clusters.  If this is not possible NULL is returned and
1146164426Ssam * the original mbuf chain is left in it's present (potentially
1147164426Ssam * modified) state.  We use two techniques: collapsing consecutive
1148164426Ssam * mbufs and replacing consecutive mbufs by a cluster.
1149164426Ssam */
1150164426Ssamstatic struct mbuf *
1151164426Ssamnpe_defrag(struct mbuf *m0, int how, int maxfrags)
1152164426Ssam{
1153164426Ssam	struct mbuf *m, *n, *n2, **prev;
1154164426Ssam	u_int curfrags;
1155164426Ssam
1156164426Ssam	/*
1157164426Ssam	 * Calculate the current number of frags.
1158164426Ssam	 */
1159164426Ssam	curfrags = 0;
1160164426Ssam	for (m = m0; m != NULL; m = m->m_next)
1161164426Ssam		curfrags++;
1162164426Ssam	/*
1163164426Ssam	 * First, try to collapse mbufs.  Note that we always collapse
1164164426Ssam	 * towards the front so we don't need to deal with moving the
1165164426Ssam	 * pkthdr.  This may be suboptimal if the first mbuf has much
1166164426Ssam	 * less data than the following.
1167164426Ssam	 */
1168164426Ssam	m = m0;
1169164426Ssamagain:
1170164426Ssam	for (;;) {
1171164426Ssam		n = m->m_next;
1172164426Ssam		if (n == NULL)
1173164426Ssam			break;
1174164426Ssam		if ((m->m_flags & M_RDONLY) == 0 &&
1175164426Ssam		    n->m_len < M_TRAILINGSPACE(m)) {
1176164426Ssam			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1177164426Ssam				n->m_len);
1178164426Ssam			m->m_len += n->m_len;
1179164426Ssam			m->m_next = n->m_next;
1180164426Ssam			m_free(n);
1181164426Ssam			if (--curfrags <= maxfrags)
1182164426Ssam				return m0;
1183164426Ssam		} else
1184164426Ssam			m = n;
1185164426Ssam	}
1186164426Ssam	KASSERT(maxfrags > 1,
1187164426Ssam		("maxfrags %u, but normal collapse failed", maxfrags));
1188164426Ssam	/*
1189164426Ssam	 * Collapse consecutive mbufs to a cluster.
1190164426Ssam	 */
1191164426Ssam	prev = &m0->m_next;		/* NB: not the first mbuf */
1192164426Ssam	while ((n = *prev) != NULL) {
1193164426Ssam		if ((n2 = n->m_next) != NULL &&
1194164426Ssam		    n->m_len + n2->m_len < MCLBYTES) {
1195164426Ssam			m = m_getcl(how, MT_DATA, 0);
1196164426Ssam			if (m == NULL)
1197164426Ssam				goto bad;
1198164426Ssam			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1199164426Ssam			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1200164426Ssam				n2->m_len);
1201164426Ssam			m->m_len = n->m_len + n2->m_len;
1202164426Ssam			m->m_next = n2->m_next;
1203164426Ssam			*prev = m;
1204164426Ssam			m_free(n);
1205164426Ssam			m_free(n2);
1206164426Ssam			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1207164426Ssam				return m0;
1208164426Ssam			/*
1209164426Ssam			 * Still not there, try the normal collapse
1210164426Ssam			 * again before we allocate another cluster.
1211164426Ssam			 */
1212164426Ssam			goto again;
1213164426Ssam		}
1214164426Ssam		prev = &n->m_next;
1215164426Ssam	}
1216164426Ssam	/*
1217164426Ssam	 * No place where we can collapse to a cluster; punt.
1218164426Ssam	 * This can occur if, for example, you request 2 frags
1219164426Ssam	 * but the packet requires that both be clusters (we
1220164426Ssam	 * never reallocate the first mbuf to avoid moving the
1221164426Ssam	 * packet header).
1222164426Ssam	 */
1223164426Ssambad:
1224164426Ssam	return NULL;
1225164426Ssam}
1226164426Ssam
1227164426Ssam/*
1228164426Ssam * Dequeue packets and place on the h/w transmit queue.
1229164426Ssam */
1230164426Ssamstatic void
1231164426Ssamnpestart_locked(struct ifnet *ifp)
1232164426Ssam{
1233164426Ssam	struct npe_softc *sc = ifp->if_softc;
1234164426Ssam	struct npebuf *npe;
1235164426Ssam	struct npehwbuf *hw;
1236164426Ssam	struct mbuf *m, *n;
1237164426Ssam	struct npedma *dma = &sc->txdma;
1238164426Ssam	bus_dma_segment_t segs[NPE_MAXSEG];
1239164426Ssam	int nseg, len, error, i;
1240164426Ssam	uint32_t next;
1241164426Ssam
1242164426Ssam	NPE_ASSERT_LOCKED(sc);
1243164426Ssam	/* XXX can this happen? */
1244164426Ssam	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1245164426Ssam		return;
1246164426Ssam
1247164426Ssam	while (sc->tx_free != NULL) {
1248164426Ssam		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1249164426Ssam		if (m == NULL) {
1250164426Ssam			/* XXX? */
1251164426Ssam			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1252164426Ssam			return;
1253164426Ssam		}
1254164426Ssam		npe = sc->tx_free;
1255164426Ssam		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1256164426Ssam		    m, segs, &nseg, 0);
1257164426Ssam		if (error == EFBIG) {
1258164426Ssam			n = npe_defrag(m, M_DONTWAIT, NPE_MAXSEG);
1259164426Ssam			if (n == NULL) {
1260164426Ssam				if_printf(ifp, "%s: too many fragments %u\n",
1261164426Ssam				    __func__, nseg);
1262164426Ssam				m_freem(m);
1263164426Ssam				return;	/* XXX? */
1264164426Ssam			}
1265164426Ssam			m = n;
1266164426Ssam			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1267164426Ssam			    m, segs, &nseg, 0);
1268164426Ssam		}
1269164426Ssam		if (error != 0 || nseg == 0) {
1270164426Ssam			if_printf(ifp, "%s: error %u nseg %u\n",
1271164426Ssam			    __func__, error, nseg);
1272164426Ssam			m_freem(m);
1273164426Ssam			return;	/* XXX? */
1274164426Ssam		}
1275164426Ssam		sc->tx_free = npe->ix_next;
1276164426Ssam
1277164426Ssam		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1278164426Ssam
1279164426Ssam		/*
1280164426Ssam		 * Tap off here if there is a bpf listener.
1281164426Ssam		 */
1282164426Ssam		BPF_MTAP(ifp, m);
1283164426Ssam
1284164426Ssam		npe->ix_m = m;
1285164426Ssam		hw = npe->ix_hw;
1286164426Ssam		len = m->m_pkthdr.len;
1287164426Ssam		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1288164426Ssam		for (i = 0; i < nseg; i++) {
1289164426Ssam			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1290164426Ssam			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1291164426Ssam			hw->ix_ne[i].next = htobe32(next);
1292164426Ssam
1293164426Ssam			len = 0;		/* zero for segments > 1 */
1294164426Ssam			next += sizeof(hw->ix_ne[0]);
1295164426Ssam		}
1296164426Ssam		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1297164426Ssam		/* XXX flush descriptor instead of using uncached memory */
1298164426Ssam
1299164426Ssam		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1300164426Ssam		    __func__, sc->tx_qid, npe->ix_neaddr,
1301164426Ssam		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1302164426Ssam		/* stick it on the tx q */
1303164426Ssam		/* XXX add vlan priority */
1304164426Ssam		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1305164426Ssam
1306166339Skevlo		sc->npe_watchdog_timer = 5;
1307164426Ssam	}
1308164426Ssam	if (sc->tx_free == NULL)
1309164426Ssam		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1310164426Ssam}
1311164426Ssam
1312164426Ssamvoid
1313164426Ssamnpestart(struct ifnet *ifp)
1314164426Ssam{
1315164426Ssam	struct npe_softc *sc = ifp->if_softc;
1316164426Ssam	NPE_LOCK(sc);
1317164426Ssam	npestart_locked(ifp);
1318164426Ssam	NPE_UNLOCK(sc);
1319164426Ssam}
1320164426Ssam
1321164426Ssamstatic void
1322164426Ssamnpe_stopxmit(struct npe_softc *sc)
1323164426Ssam{
1324164426Ssam	struct npedma *dma = &sc->txdma;
1325164426Ssam	int i;
1326164426Ssam
1327164426Ssam	NPE_ASSERT_LOCKED(sc);
1328164426Ssam
1329164426Ssam	/* XXX qmgr */
1330164426Ssam	for (i = 0; i < dma->nbuf; i++) {
1331164426Ssam		struct npebuf *npe = &dma->buf[i];
1332164426Ssam
1333164426Ssam		if (npe->ix_m != NULL) {
1334164426Ssam			bus_dmamap_unload(dma->mtag, npe->ix_map);
1335164426Ssam			m_freem(npe->ix_m);
1336164426Ssam			npe->ix_m = NULL;
1337164426Ssam		}
1338164426Ssam	}
1339164426Ssam}
1340164426Ssam
1341164426Ssamstatic void
1342164426Ssamnpe_stoprecv(struct npe_softc *sc)
1343164426Ssam{
1344164426Ssam	struct npedma *dma = &sc->rxdma;
1345164426Ssam	int i;
1346164426Ssam
1347164426Ssam	NPE_ASSERT_LOCKED(sc);
1348164426Ssam
1349164426Ssam	/* XXX qmgr */
1350164426Ssam	for (i = 0; i < dma->nbuf; i++) {
1351164426Ssam		struct npebuf *npe = &dma->buf[i];
1352164426Ssam
1353164426Ssam		if (npe->ix_m != NULL) {
1354164426Ssam			bus_dmamap_unload(dma->mtag, npe->ix_map);
1355164426Ssam			m_freem(npe->ix_m);
1356164426Ssam			npe->ix_m = NULL;
1357164426Ssam		}
1358164426Ssam	}
1359164426Ssam}
1360164426Ssam
1361164426Ssam/*
1362164426Ssam * Turn off interrupts, and stop the nic.
1363164426Ssam */
1364164426Ssamvoid
1365164426Ssamnpestop(struct npe_softc *sc)
1366164426Ssam{
1367164426Ssam	struct ifnet *ifp = sc->sc_ifp;
1368164426Ssam
1369164426Ssam	/*  disable transmitter and reciver in the MAC  */
1370164426Ssam 	WR4(sc, NPE_MAC_RX_CNTRL1,
1371164426Ssam	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1372164426Ssam 	WR4(sc, NPE_MAC_TX_CNTRL1,
1373164426Ssam	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1374164426Ssam
1375166339Skevlo	sc->npe_watchdog_timer = 0;
1376164426Ssam	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1377164426Ssam
1378164426Ssam	callout_stop(&sc->tick_ch);
1379164426Ssam
1380164426Ssam	npe_stopxmit(sc);
1381164426Ssam	npe_stoprecv(sc);
1382164426Ssam	/* XXX go into loopback & drain q's? */
1383164426Ssam	/* XXX but beware of disabling tx above */
1384164426Ssam
1385164426Ssam	/*
1386164426Ssam	 * The MAC core rx/tx disable may leave the MAC hardware in an
1387164426Ssam	 * unpredictable state. A hw reset is executed before resetting
1388164426Ssam	 * all the MAC parameters to a known value.
1389164426Ssam	 */
1390164426Ssam	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1391164426Ssam	DELAY(NPE_MAC_RESET_DELAY);
1392164426Ssam	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1393164426Ssam	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1394164426Ssam}
1395164426Ssam
1396164426Ssamvoid
1397166339Skevlonpewatchdog(struct npe_softc *sc)
1398164426Ssam{
1399166339Skevlo	NPE_ASSERT_LOCKED(sc);
1400164426Ssam
1401166339Skevlo	if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1402166339Skevlo		return;
1403166339Skevlo
1404166339Skevlo	device_printf(sc->sc_dev, "watchdog timeout\n");
1405166339Skevlo	sc->sc_ifp->if_oerrors++;
1406166339Skevlo
1407164426Ssam	npeinit_locked(sc);
1408164426Ssam}
1409164426Ssam
1410164426Ssamstatic int
1411164426Ssamnpeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1412164426Ssam{
1413164426Ssam	struct npe_softc *sc = ifp->if_softc;
1414164426Ssam 	struct mii_data *mii;
1415164426Ssam 	struct ifreq *ifr = (struct ifreq *)data;
1416164426Ssam	int error = 0;
1417164426Ssam#ifdef DEVICE_POLLING
1418164426Ssam	int mask;
1419164426Ssam#endif
1420164426Ssam
1421164426Ssam	switch (cmd) {
1422164426Ssam	case SIOCSIFFLAGS:
1423164426Ssam		NPE_LOCK(sc);
1424164426Ssam		if ((ifp->if_flags & IFF_UP) == 0 &&
1425164426Ssam		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1426164426Ssam			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1427164426Ssam			npestop(sc);
1428164426Ssam		} else {
1429164426Ssam			/* reinitialize card on any parameter change */
1430164426Ssam			npeinit_locked(sc);
1431164426Ssam		}
1432164426Ssam		NPE_UNLOCK(sc);
1433164426Ssam		break;
1434164426Ssam
1435164426Ssam	case SIOCADDMULTI:
1436164426Ssam	case SIOCDELMULTI:
1437164426Ssam		/* update multicast filter list. */
1438164426Ssam		NPE_LOCK(sc);
1439164426Ssam		npe_setmcast(sc);
1440164426Ssam		NPE_UNLOCK(sc);
1441164426Ssam		error = 0;
1442164426Ssam		break;
1443164426Ssam
1444164426Ssam  	case SIOCSIFMEDIA:
1445164426Ssam  	case SIOCGIFMEDIA:
1446164426Ssam 		mii = device_get_softc(sc->sc_mii);
1447164426Ssam 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1448164426Ssam  		break;
1449164426Ssam
1450164426Ssam#ifdef DEVICE_POLLING
1451164426Ssam	case SIOCSIFCAP:
1452164426Ssam		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1453164426Ssam		if (mask & IFCAP_POLLING) {
1454164426Ssam			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1455164426Ssam				error = ether_poll_register(npe_poll, ifp);
1456164426Ssam				if (error)
1457164426Ssam					return error;
1458164426Ssam				NPE_LOCK(sc);
1459164426Ssam				/* disable callbacks XXX txdone is shared */
1460164426Ssam				ixpqmgr_notify_disable(sc->rx_qid);
1461164426Ssam				ixpqmgr_notify_disable(sc->tx_doneqid);
1462164426Ssam				ifp->if_capenable |= IFCAP_POLLING;
1463164426Ssam				NPE_UNLOCK(sc);
1464164426Ssam			} else {
1465164426Ssam				error = ether_poll_deregister(ifp);
1466164426Ssam				/* NB: always enable qmgr callbacks */
1467164426Ssam				NPE_LOCK(sc);
1468164426Ssam				/* enable qmgr callbacks */
1469164426Ssam				ixpqmgr_notify_enable(sc->rx_qid,
1470164426Ssam				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1471164426Ssam				ixpqmgr_notify_enable(sc->tx_doneqid,
1472164426Ssam				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1473164426Ssam				ifp->if_capenable &= ~IFCAP_POLLING;
1474164426Ssam				NPE_UNLOCK(sc);
1475164426Ssam			}
1476164426Ssam		}
1477164426Ssam		break;
1478164426Ssam#endif
1479164426Ssam	default:
1480164426Ssam		error = ether_ioctl(ifp, cmd, data);
1481164426Ssam		break;
1482164426Ssam	}
1483164426Ssam	return error;
1484164426Ssam}
1485164426Ssam
1486164426Ssam/*
1487164426Ssam * Setup a traffic class -> rx queue mapping.
1488164426Ssam */
1489164426Ssamstatic int
1490164426Ssamnpe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1491164426Ssam{
1492164426Ssam	int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid;
1493164426Ssam	uint32_t msg[2];
1494164426Ssam
1495164426Ssam	msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix;
1496164426Ssam	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1497164426Ssam	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1498164426Ssam}
1499164426Ssam
1500164426Ssam/*
1501164426Ssam * Update and reset the statistics in the NPE.
1502164426Ssam */
1503164426Ssamstatic int
1504164426Ssamnpe_updatestats(struct npe_softc *sc)
1505164426Ssam{
1506164426Ssam	uint32_t msg[2];
1507164426Ssam
1508164426Ssam	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1509164426Ssam	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1510164426Ssam	return ixpnpe_sendmsg(sc->sc_npe, msg);		/* NB: no recv */
1511164426Ssam}
1512164426Ssam
1513164426Ssam#if 0
1514164426Ssam/*
1515164426Ssam * Get the current statistics block.
1516164426Ssam */
1517164426Ssamstatic int
1518164426Ssamnpe_getstats(struct npe_softc *sc)
1519164426Ssam{
1520164426Ssam	uint32_t msg[2];
1521164426Ssam
1522164426Ssam	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1523164426Ssam	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1524164426Ssam	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1525164426Ssam}
1526164426Ssam
1527164426Ssam/*
1528164426Ssam * Query the image id of the loaded firmware.
1529164426Ssam */
1530164426Ssamstatic uint32_t
1531164426Ssamnpe_getimageid(struct npe_softc *sc)
1532164426Ssam{
1533164426Ssam	uint32_t msg[2];
1534164426Ssam
1535164426Ssam	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1536164426Ssam	msg[1] = 0;
1537164426Ssam	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1538164426Ssam}
1539164426Ssam
1540164426Ssam/*
1541164426Ssam * Enable/disable loopback.
1542164426Ssam */
1543164426Ssamstatic int
1544164426Ssamnpe_setloopback(struct npe_softc *sc, int ena)
1545164426Ssam{
1546164426Ssam	uint32_t msg[2];
1547164426Ssam
1548164426Ssam	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1549164426Ssam	msg[1] = 0;
1550164426Ssam	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1551164426Ssam}
1552164426Ssam#endif
1553164426Ssam
1554164426Ssamstatic void
1555164426Ssamnpe_child_detached(device_t dev, device_t child)
1556164426Ssam{
1557164426Ssam	struct npe_softc *sc;
1558164426Ssam
1559164426Ssam	sc = device_get_softc(dev);
1560164426Ssam	if (child == sc->sc_mii)
1561164426Ssam		sc->sc_mii = NULL;
1562164426Ssam}
1563164426Ssam
1564164426Ssam/*
1565164426Ssam * MII bus support routines.
1566164426Ssam *
1567164426Ssam * NB: ixp425 has one PHY per NPE
1568164426Ssam */
1569164426Ssamstatic uint32_t
1570164426Ssamnpe_mii_mdio_read(struct npe_softc *sc, int reg)
1571164426Ssam{
1572164426Ssam#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1573164426Ssam	uint32_t v;
1574164426Ssam
1575164426Ssam	/* NB: registers are known to be sequential */
1576164426Ssam	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1577164426Ssam	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1578164426Ssam	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1579164426Ssam	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1580164426Ssam	return v;
1581164426Ssam#undef MII_RD4
1582164426Ssam}
1583164426Ssam
1584164426Ssamstatic void
1585164426Ssamnpe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1586164426Ssam{
1587164426Ssam#define	MII_WR4(sc, reg, v) \
1588164426Ssam	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1589164426Ssam
1590164426Ssam	/* NB: registers are known to be sequential */
1591164426Ssam	MII_WR4(sc, reg+0, cmd & 0xff);
1592164426Ssam	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1593164426Ssam	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1594164426Ssam	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1595164426Ssam#undef MII_WR4
1596164426Ssam}
1597164426Ssam
1598164426Ssamstatic int
1599164426Ssamnpe_mii_mdio_wait(struct npe_softc *sc)
1600164426Ssam{
1601164426Ssam#define	MAXTRIES	100	/* XXX */
1602164426Ssam	uint32_t v;
1603164426Ssam	int i;
1604164426Ssam
1605164426Ssam	for (i = 0; i < MAXTRIES; i++) {
1606164426Ssam		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1607164426Ssam		if ((v & NPE_MII_GO) == 0)
1608164426Ssam			return 1;
1609164426Ssam	}
1610164426Ssam	return 0;		/* NB: timeout */
1611164426Ssam#undef MAXTRIES
1612164426Ssam}
1613164426Ssam
1614164426Ssamstatic int
1615164426Ssamnpe_miibus_readreg(device_t dev, int phy, int reg)
1616164426Ssam{
1617164426Ssam	struct npe_softc *sc = device_get_softc(dev);
1618164426Ssam	uint32_t v;
1619164426Ssam
1620164426Ssam	if (phy != device_get_unit(dev))	/* XXX */
1621164426Ssam		return 0xffff;
1622164426Ssam	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1623164426Ssam	  | NPE_MII_GO;
1624164426Ssam	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1625164426Ssam	if (npe_mii_mdio_wait(sc))
1626164426Ssam		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1627164426Ssam	else
1628164426Ssam		v = 0xffff | NPE_MII_READ_FAIL;
1629164426Ssam	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1630164426Ssam#undef MAXTRIES
1631164426Ssam}
1632164426Ssam
1633164426Ssamstatic void
1634164426Ssamnpe_miibus_writereg(device_t dev, int phy, int reg, int data)
1635164426Ssam{
1636164426Ssam	struct npe_softc *sc = device_get_softc(dev);
1637164426Ssam	uint32_t v;
1638164426Ssam
1639164426Ssam	if (phy != device_get_unit(dev))	/* XXX */
1640164426Ssam		return;
1641164426Ssam	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1642164426Ssam	  | data | NPE_MII_WRITE
1643164426Ssam	  | NPE_MII_GO;
1644164426Ssam	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1645164426Ssam	/* XXX complain about timeout */
1646164426Ssam	(void) npe_mii_mdio_wait(sc);
1647164426Ssam}
1648164426Ssam
1649164426Ssamstatic void
1650164426Ssamnpe_miibus_statchg(device_t dev)
1651164426Ssam{
1652164426Ssam	struct npe_softc *sc = device_get_softc(dev);
1653164426Ssam	struct mii_data *mii = device_get_softc(sc->sc_mii);
1654164426Ssam	uint32_t tx1, rx1;
1655164426Ssam
1656164426Ssam	/* sync MAC duplex state */
1657164426Ssam	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1658164426Ssam	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1659164426Ssam	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1660164426Ssam		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1661164426Ssam		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1662164426Ssam	} else {
1663164426Ssam		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1664164426Ssam		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1665164426Ssam	}
1666164426Ssam	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1667164426Ssam	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1668164426Ssam}
1669164426Ssam
1670164426Ssamstatic device_method_t npe_methods[] = {
1671164426Ssam	/* Device interface */
1672164426Ssam	DEVMETHOD(device_probe,		npe_probe),
1673164426Ssam	DEVMETHOD(device_attach,	npe_attach),
1674164426Ssam	DEVMETHOD(device_detach,	npe_detach),
1675164426Ssam
1676164426Ssam	/* Bus interface */
1677164426Ssam	DEVMETHOD(bus_child_detached,	npe_child_detached),
1678164426Ssam
1679164426Ssam	/* MII interface */
1680164426Ssam	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1681164426Ssam	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1682164426Ssam	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1683164426Ssam
1684164426Ssam	{ 0, 0 }
1685164426Ssam};
1686164426Ssam
1687164426Ssamstatic driver_t npe_driver = {
1688164426Ssam	"npe",
1689164426Ssam	npe_methods,
1690164426Ssam	sizeof(struct npe_softc),
1691164426Ssam};
1692164426Ssam
1693164426SsamDRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1694164426SsamDRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1695164426SsamMODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1696164426SsamMODULE_DEPEND(npe, miibus, 1, 1, 1);
1697164426SsamMODULE_DEPEND(npe, ether, 1, 1, 1);
1698