if_npe.c revision 177505
1/*-
2 * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 177505 2008-03-22 16:53:28Z sam $");
27
28/*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 */
43#ifdef HAVE_KERNEL_OPTION_HEADERS
44#include "opt_device_polling.h"
45#endif
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/kernel.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/module.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/endian.h>
59#include <machine/bus.h>
60
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_mib.h>
67#include <net/if_types.h>
68
69#ifdef INET
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/in_var.h>
73#include <netinet/ip.h>
74#endif
75
76#include <net/bpf.h>
77#include <net/bpfdesc.h>
78
79#include <arm/xscale/ixp425/ixp425reg.h>
80#include <arm/xscale/ixp425/ixp425var.h>
81#include <arm/xscale/ixp425/ixp425_qmgr.h>
82#include <arm/xscale/ixp425/ixp425_npevar.h>
83
84#include <dev/mii/mii.h>
85#include <dev/mii/miivar.h>
86#include <arm/xscale/ixp425/if_npereg.h>
87
88#include "miibus_if.h"
89
90/*
91 * XXX: For the main bus dma tag. Can go away if the new method to get the
92 * dma tag from the parent got MFC'd into RELENG_6.
93 */
94extern struct ixp425_softc *ixp425_softc;
95
96struct npebuf {
97	struct npebuf	*ix_next;	/* chain to next buffer */
98	void		*ix_m;		/* backpointer to mbuf */
99	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
100	struct npehwbuf	*ix_hw;		/* associated h/w block */
101	uint32_t	ix_neaddr;	/* phys address of ix_hw */
102};
103
104struct npedma {
105	const char*	name;
106	int		nbuf;		/* # npebuf's allocated */
107	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
108	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
109	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
110	bus_dmamap_t	buf_map;
111	bus_addr_t	buf_phys;	/* phys addr of buffers */
112	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
113};
114
115struct npe_softc {
116	/* XXX mii requires this be first; do not move! */
117	struct ifnet	*sc_ifp;	/* ifnet pointer */
118	struct mtx	sc_mtx;		/* basically a perimeter lock */
119	device_t	sc_dev;
120	bus_space_tag_t	sc_iot;
121	bus_space_handle_t sc_ioh;	/* MAC register window */
122	device_t	sc_mii;		/* child miibus */
123	bus_space_handle_t sc_miih;	/* MII register window */
124	struct ixpnpe_softc *sc_npe;	/* NPE support */
125	int		sc_debug;	/* DPRINTF* control */
126	int		sc_tickinterval;
127	struct callout	tick_ch;	/* Tick callout */
128	int		npe_watchdog_timer;
129	struct npedma	txdma;
130	struct npebuf	*tx_free;	/* list of free tx buffers */
131	struct npedma	rxdma;
132	bus_addr_t	buf_phys;	/* XXX for returning a value */
133	int		rx_qid;		/* rx qid */
134	int		rx_freeqid;	/* rx free buffers qid */
135	int		tx_qid;		/* tx qid */
136	int		tx_doneqid;	/* tx completed qid */
137	int		sc_phy;		/* PHY id */
138	struct ifmib_iso_8802_3 mibdata;
139	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
140	struct npestats	*sc_stats;
141	bus_dmamap_t	sc_stats_map;
142	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
143};
144
145/*
146 * Per-unit static configuration for IXP425.  The tx and
147 * rx free Q id's are fixed by the NPE microcode.  The
148 * rx Q id's are programmed to be separate to simplify
149 * multi-port processing.  It may be better to handle
150 * all traffic through one Q (as done by the Intel drivers).
151 *
152 * Note that the PHY's are accessible only from MAC A
153 * on the IXP425.  This and other platform-specific
154 * assumptions probably need to be handled through hints.
155 */
156static const struct {
157	const char	*desc;		/* device description */
158	int		npeid;		/* NPE assignment */
159	uint32_t	imageid;	/* NPE firmware image id */
160	uint32_t	regbase;
161	int		regsize;
162	uint32_t	miibase;
163	int		miisize;
164	int		phy;		/* phy id */
165	uint8_t		rx_qid;
166	uint8_t		rx_freeqid;
167	uint8_t		tx_qid;
168	uint8_t		tx_doneqid;
169} npeconfig[NPE_PORTS_MAX] = {
170	{ .desc		= "IXP NPE-B",
171	  .npeid	= NPE_B,
172	  .imageid	= IXP425_NPE_B_IMAGEID,
173	  .regbase	= IXP425_MAC_A_HWBASE,
174	  .regsize	= IXP425_MAC_A_SIZE,
175	  .miibase	= IXP425_MAC_A_HWBASE,
176	  .miisize	= IXP425_MAC_A_SIZE,
177	  .phy		= 0,
178	  .rx_qid	= 4,
179	  .rx_freeqid	= 27,
180	  .tx_qid	= 24,
181	  .tx_doneqid	= 31
182	},
183	{ .desc		= "IXP NPE-C",
184	  .npeid	= NPE_C,
185	  .imageid	= IXP425_NPE_C_IMAGEID,
186	  .regbase	= IXP425_MAC_B_HWBASE,
187	  .regsize	= IXP425_MAC_B_SIZE,
188	  .miibase	= IXP425_MAC_A_HWBASE,
189	  .miisize	= IXP425_MAC_A_SIZE,
190	  .phy		= 1,
191	  .rx_qid	= 12,
192	  .rx_freeqid	= 28,
193	  .tx_qid	= 25,
194	  .tx_doneqid	= 31
195	},
196};
197static struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
198
199static __inline uint32_t
200RD4(struct npe_softc *sc, bus_size_t off)
201{
202	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
203}
204
205static __inline void
206WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
207{
208	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
209}
210
211#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
212#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
213#define NPE_LOCK_INIT(_sc) \
214	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
215	    MTX_NETWORK_LOCK, MTX_DEF)
216#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
217#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
218#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
219
220static devclass_t npe_devclass;
221
222static int	npe_activate(device_t dev);
223static void	npe_deactivate(device_t dev);
224static int	npe_ifmedia_update(struct ifnet *ifp);
225static void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
226static void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
227static void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
228static void	npe_txdone(int qid, void *arg);
229static int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
230			struct mbuf *);
231static void	npe_rxdone(int qid, void *arg);
232static void	npeinit(void *);
233static void	npestart_locked(struct ifnet *);
234static void	npestart(struct ifnet *);
235static void	npestop(struct npe_softc *);
236static void	npewatchdog(struct npe_softc *);
237static int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
238
239static int	npe_setrxqosentry(struct npe_softc *, int classix,
240			int trafclass, int qid);
241static int	npe_updatestats(struct npe_softc *);
242#if 0
243static int	npe_getstats(struct npe_softc *);
244static uint32_t	npe_getimageid(struct npe_softc *);
245static int	npe_setloopback(struct npe_softc *, int ena);
246#endif
247
248/* NB: all tx done processing goes through one queue */
249static int tx_doneqid = -1;
250
251SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters");
252
253static int npe_debug = 0;
254SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
255	   0, "IXP425 NPE network interface debug msgs");
256TUNABLE_INT("hw.npe.npe", &npe_debug);
257#define	DPRINTF(sc, fmt, ...) do {					\
258	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
259} while (0)
260#define	DPRINTFn(n, sc, fmt, ...) do {					\
261	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
262} while (0)
263static int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
264SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
265	    0, "periodic work interval (secs)");
266TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
267
268static	int npe_rxbuf = 64;		/* # rx buffers to allocate */
269SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
270	    0, "rx buffers allocated");
271TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
272static	int npe_txbuf = 128;		/* # tx buffers to allocate */
273SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
274	    0, "tx buffers allocated");
275TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
276
277static int
278npe_probe(device_t dev)
279{
280	int unit = device_get_unit(dev);
281
282	if (unit >= NPE_PORTS_MAX) {
283		device_printf(dev, "unit %d not supported\n", unit);
284		return EINVAL;
285	}
286	/* XXX check feature register to see if enabled */
287	device_set_desc(dev, npeconfig[unit].desc);
288	return 0;
289}
290
291static int
292npe_attach(device_t dev)
293{
294	struct npe_softc *sc = device_get_softc(dev);
295	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
296	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
297	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
298	struct ifnet *ifp = NULL;
299	int error;
300	u_char eaddr[6];
301
302	sc->sc_dev = dev;
303	sc->sc_iot = sa->sc_iot;
304	NPE_LOCK_INIT(sc);
305	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
306	sc->sc_debug = npe_debug;
307	sc->sc_tickinterval = npe_tickinterval;
308
309	sc->sc_npe = ixpnpe_attach(dev);
310	if (sc->sc_npe == NULL) {
311		error = EIO;		/* XXX */
312		goto out;
313	}
314
315	error = npe_activate(dev);
316	if (error)
317		goto out;
318
319	npe_getmac(sc, eaddr);
320
321	/* NB: must be setup prior to invoking mii code */
322	sc->sc_ifp = ifp = if_alloc(IFT_ETHER);
323	if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
324		device_printf(dev, "Cannot find my PHY.\n");
325		error = ENXIO;
326		goto out;
327	}
328
329	ifp->if_softc = sc;
330	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
331	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
332	ifp->if_start = npestart;
333	ifp->if_ioctl = npeioctl;
334	ifp->if_init = npeinit;
335	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
336	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
337	IFQ_SET_READY(&ifp->if_snd);
338	ifp->if_linkmib = &sc->mibdata;
339	ifp->if_linkmiblen = sizeof(sc->mibdata);
340	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
341#ifdef DEVICE_POLLING
342	ifp->if_capabilities |= IFCAP_POLLING;
343#endif
344
345	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
346	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
347	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
348	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
349
350	ether_ifattach(ifp, eaddr);
351	return 0;
352out:
353	npe_deactivate(dev);
354	if (ifp != NULL)
355		if_free(ifp);
356	return error;
357}
358
359static int
360npe_detach(device_t dev)
361{
362	struct npe_softc *sc = device_get_softc(dev);
363	struct ifnet *ifp = sc->sc_ifp;
364
365#ifdef DEVICE_POLLING
366	if (ifp->if_capenable & IFCAP_POLLING)
367		ether_poll_deregister(ifp);
368#endif
369	npestop(sc);
370	if (ifp != NULL) {
371		ether_ifdetach(ifp);
372		if_free(ifp);
373	}
374	NPE_LOCK_DESTROY(sc);
375	npe_deactivate(dev);
376	if (sc->sc_npe != NULL)
377		ixpnpe_detach(sc->sc_npe);
378	return 0;
379}
380
381/*
382 * Compute and install the multicast filter.
383 */
384static void
385npe_setmcast(struct npe_softc *sc)
386{
387	struct ifnet *ifp = sc->sc_ifp;
388	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
389	int i;
390
391	if (ifp->if_flags & IFF_PROMISC) {
392		memset(mask, 0, ETHER_ADDR_LEN);
393		memset(addr, 0, ETHER_ADDR_LEN);
394	} else if (ifp->if_flags & IFF_ALLMULTI) {
395		static const uint8_t allmulti[ETHER_ADDR_LEN] =
396		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
397		memcpy(mask, allmulti, ETHER_ADDR_LEN);
398		memcpy(addr, allmulti, ETHER_ADDR_LEN);
399	} else {
400		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
401		struct ifmultiaddr *ifma;
402		const uint8_t *mac;
403
404		memset(clr, 0, ETHER_ADDR_LEN);
405		memset(set, 0xff, ETHER_ADDR_LEN);
406
407		IF_ADDR_LOCK(ifp);
408		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
409			if (ifma->ifma_addr->sa_family != AF_LINK)
410				continue;
411			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
412			for (i = 0; i < ETHER_ADDR_LEN; i++) {
413				clr[i] |= mac[i];
414				set[i] &= mac[i];
415			}
416		}
417		IF_ADDR_UNLOCK(ifp);
418
419		for (i = 0; i < ETHER_ADDR_LEN; i++) {
420			mask[i] = set[i] | ~clr[i];
421			addr[i] = set[i];
422		}
423	}
424
425	/*
426	 * Write the mask and address registers.
427	 */
428	for (i = 0; i < ETHER_ADDR_LEN; i++) {
429		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
430		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
431	}
432}
433
434static void
435npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
436{
437	struct npe_softc *sc;
438
439	if (error != 0)
440		return;
441	sc = (struct npe_softc *)arg;
442	sc->buf_phys = segs[0].ds_addr;
443}
444
445static int
446npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
447	const char *name, int nbuf, int maxseg)
448{
449	int error, i;
450
451	memset(dma, 0, sizeof(dma));
452
453	dma->name = name;
454	dma->nbuf = nbuf;
455
456	/* DMA tag for mapped mbufs  */
457	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
458	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
459	    MCLBYTES, maxseg, MCLBYTES, 0,
460	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
461	if (error != 0) {
462		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
463		     "error %u\n", dma->name, error);
464		return error;
465	}
466
467	/* DMA tag and map for the NPE buffers */
468	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
469	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
470	    nbuf * sizeof(struct npehwbuf), 1,
471	    nbuf * sizeof(struct npehwbuf), 0,
472	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
473	if (error != 0) {
474		device_printf(sc->sc_dev,
475		    "unable to create %s npebuf dma tag, error %u\n",
476		    dma->name, error);
477		return error;
478	}
479	/* XXX COHERENT for now */
480	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
481	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
482	    &dma->buf_map) != 0) {
483		device_printf(sc->sc_dev,
484		     "unable to allocate memory for %s h/w buffers, error %u\n",
485		     dma->name, error);
486		return error;
487	}
488	/* XXX M_TEMP */
489	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
490	if (dma->buf == NULL) {
491		device_printf(sc->sc_dev,
492		     "unable to allocate memory for %s s/w buffers\n",
493		     dma->name);
494		return error;
495	}
496	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
497	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
498		device_printf(sc->sc_dev,
499		     "unable to map memory for %s h/w buffers, error %u\n",
500		     dma->name, error);
501		return error;
502	}
503	dma->buf_phys = sc->buf_phys;
504	for (i = 0; i < dma->nbuf; i++) {
505		struct npebuf *npe = &dma->buf[i];
506		struct npehwbuf *hw = &dma->hwbuf[i];
507
508		/* calculate offset to shared area */
509		npe->ix_neaddr = dma->buf_phys +
510			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
511		KASSERT((npe->ix_neaddr & 0x1f) == 0,
512		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
513		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
514				&npe->ix_map);
515		if (error != 0) {
516			device_printf(sc->sc_dev,
517			     "unable to create dmamap for %s buffer %u, "
518			     "error %u\n", dma->name, i, error);
519			return error;
520		}
521		npe->ix_hw = hw;
522	}
523	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
524	return 0;
525}
526
527static void
528npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
529{
530	int i;
531
532	if (dma->hwbuf != NULL) {
533		for (i = 0; i < dma->nbuf; i++) {
534			struct npebuf *npe = &dma->buf[i];
535			bus_dmamap_destroy(dma->mtag, npe->ix_map);
536		}
537		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
538		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
539		bus_dmamap_destroy(dma->buf_tag, dma->buf_map);
540	}
541	if (dma->buf != NULL)
542		free(dma->buf, M_TEMP);
543	if (dma->buf_tag)
544		bus_dma_tag_destroy(dma->buf_tag);
545	if (dma->mtag)
546		bus_dma_tag_destroy(dma->mtag);
547	memset(dma, 0, sizeof(*dma));
548}
549
550static int
551override_addr(device_t dev, const char *resname, int *base, int *size)
552{
553	int unit = device_get_unit(dev);
554	const char *resval;
555
556	/* XXX warn for wrong hint type */
557	if (resource_string_value("npe", unit, resname, &resval) != 0)
558		return 0;
559	switch (resval[0]) {
560	case 'A':
561		*base = IXP425_MAC_A_HWBASE;
562		*size = IXP425_MAC_A_SIZE;
563		break;
564	case 'B':
565		*base = IXP425_MAC_B_HWBASE;
566		*size = IXP425_MAC_B_SIZE;
567		break;
568	default:
569		device_printf(dev, "Warning, bad value %s for "
570		    "npe.%d.%s ignored\n", resval, unit, resname);
571		return 0;
572	}
573	if (bootverbose)
574		device_printf(dev, "using npe.%d.%s=%s override\n",
575		    unit, resname, resval);
576	return 1;
577}
578
579static int
580override_unit(device_t dev, const char *resname, int *val, int min, int max)
581{
582	int unit = device_get_unit(dev);
583	int resval;
584
585	if (resource_int_value("npe", unit, resname, &resval) != 0)
586		return 0;
587	if (!(min <= resval && resval <= max)) {
588		device_printf(dev, "Warning, bad value %d for npe.%d.%s "
589		    "ignored (value must be [%d-%d])\n", resval, unit,
590		    resname, min, max);
591		return 0;
592	}
593	if (bootverbose)
594		device_printf(dev, "using npe.%d.%s=%d override\n",
595		    unit, resname, resval);
596	*val = resval;
597	return 1;
598}
599
600static int
601npe_activate(device_t dev)
602{
603	struct npe_softc * sc = device_get_softc(dev);
604	int unit = device_get_unit(dev);
605	int error, i, regbase, regsize, miibase, miisize;
606	uint32_t imageid;
607
608	/*
609	 * Load NPE firmware and start it running.  We assume
610	 * that minor version bumps remain compatible so probe
611	 * the firmware image starting with the expected version
612	 * and then bump the minor version up to the max.
613	 */
614	imageid = npeconfig[unit].imageid;
615	for (;;) {
616		error = ixpnpe_init(sc->sc_npe, "npe_fw", imageid);
617		if (error == 0)
618			break;
619		/* ESRCH is returned when the requested image is not present */
620		if (error != ESRCH)
621			return error;
622		/* bump the minor version up to the max possible */
623		if (NPEIMAGE_MINOR(imageid) == 0xff)
624			return error;
625		imageid++;
626	}
627
628	if (!override_addr(dev, "mac", &regbase, &regsize)) {
629		regbase = npeconfig[unit].regbase;
630		regbase = npeconfig[unit].regsize;
631	}
632	if (bus_space_map(sc->sc_iot, regbase, regsize, 0, &sc->sc_ioh)) {
633		device_printf(dev, "Cannot map registers 0x%x:0x%x\n",
634		    regbase, regsize);
635		return ENOMEM;
636	}
637
638	if (!override_addr(dev, "mii", &miibase, &miisize)) {
639		miibase = npeconfig[unit].miibase;
640		miisize = npeconfig[unit].miisize;
641	}
642	if (miibase != regbase) {
643		/*
644		 * PHY is mapped through a different MAC, setup an
645		 * additional mapping for frobbing the PHY registers.
646		 */
647		if (bus_space_map(sc->sc_iot, miibase, miisize, 0, &sc->sc_miih)) {
648			device_printf(dev,
649			    "Cannot map MII registers 0x%x:0x%x\n",
650			    miibase, miisize);
651			return ENOMEM;
652		}
653	} else
654		sc->sc_miih = sc->sc_ioh;
655	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
656	if (error != 0)
657		return error;
658	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
659	if (error != 0)
660		return error;
661
662	/* setup statistics block */
663	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
664	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
665	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
666	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
667	if (error != 0) {
668		device_printf(sc->sc_dev, "unable to create stats tag, "
669		     "error %u\n", error);
670		return error;
671	}
672	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
673	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
674		device_printf(sc->sc_dev,
675		     "unable to allocate memory for stats block, error %u\n",
676		     error);
677		return error;
678	}
679	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
680	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
681		device_printf(sc->sc_dev,
682		     "unable to load memory for stats block, error %u\n",
683		     error);
684		return error;
685	}
686	sc->sc_stats_phys = sc->buf_phys;
687
688	/* XXX disable half-bridge LEARNING+FILTERING feature */
689
690	/*
691	 * Setup h/w rx/tx queues.  There are four q's:
692	 *   rx		inbound q of rx'd frames
693	 *   rx_free	pool of ixpbuf's for receiving frames
694	 *   tx		outbound q of frames to send
695	 *   tx_done	q of tx frames that have been processed
696	 *
697	 * The NPE handles the actual tx/rx process and the q manager
698	 * handles the queues.  The driver just writes entries to the
699	 * q manager mailbox's and gets callbacks when there are rx'd
700	 * frames to process or tx'd frames to reap.  These callbacks
701	 * are controlled by the q configurations; e.g. we get a
702	 * callback when tx_done has 2 or more frames to process and
703	 * when the rx q has at least one frame.  These setings can
704	 * changed at the time the q is configured.
705	 */
706	sc->rx_qid = npeconfig[unit].rx_qid;
707	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
708		IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
709	sc->rx_freeqid = npeconfig[unit].rx_freeqid;
710	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
711	/* tell the NPE to direct all traffic to rx_qid */
712#if 0
713	for (i = 0; i < 8; i++)
714#else
715device_printf(sc->sc_dev, "remember to fix rx q setup\n");
716	for (i = 0; i < 4; i++)
717#endif
718		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
719
720	sc->tx_qid = npeconfig[unit].tx_qid;
721	sc->tx_doneqid = npeconfig[unit].tx_doneqid;
722	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
723	if (tx_doneqid == -1) {
724		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
725			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
726		tx_doneqid = sc->tx_doneqid;
727	}
728
729	/*
730	 * Setup phy port number.  We allow override via hints
731	 * to handle different board configs.
732	 */
733	if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1))
734		sc->sc_phy = npeconfig[unit].phy;
735
736	KASSERT(npes[npeconfig[unit].npeid] == NULL,
737	    ("npe %u already setup", npeconfig[unit].npeid));
738	npes[npeconfig[unit].npeid] = sc;
739
740	return 0;
741}
742
743static void
744npe_deactivate(device_t dev)
745{
746	struct npe_softc *sc = device_get_softc(dev);
747	int unit = device_get_unit(dev);
748
749	npes[npeconfig[unit].npeid] = NULL;
750
751	/* XXX disable q's */
752	if (sc->sc_npe != NULL)
753		ixpnpe_stop(sc->sc_npe);
754	if (sc->sc_stats != NULL) {
755		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
756		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
757			sc->sc_stats_map);
758		bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
759	}
760	if (sc->sc_stats_tag != NULL)
761		bus_dma_tag_destroy(sc->sc_stats_tag);
762	npe_dma_destroy(sc, &sc->txdma);
763	npe_dma_destroy(sc, &sc->rxdma);
764	bus_generic_detach(sc->sc_dev);
765	if (sc->sc_mii)
766		device_delete_child(sc->sc_dev, sc->sc_mii);
767#if 0
768	/* XXX sc_ioh and sc_miih */
769	if (sc->mem_res)
770		bus_release_resource(dev, SYS_RES_IOPORT,
771		    rman_get_rid(sc->mem_res), sc->mem_res);
772	sc->mem_res = 0;
773#endif
774}
775
776/*
777 * Change media according to request.
778 */
779static int
780npe_ifmedia_update(struct ifnet *ifp)
781{
782	struct npe_softc *sc = ifp->if_softc;
783	struct mii_data *mii;
784
785	mii = device_get_softc(sc->sc_mii);
786	NPE_LOCK(sc);
787	mii_mediachg(mii);
788	/* XXX push state ourself? */
789	NPE_UNLOCK(sc);
790	return (0);
791}
792
793/*
794 * Notify the world which media we're using.
795 */
796static void
797npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
798{
799	struct npe_softc *sc = ifp->if_softc;
800	struct mii_data *mii;
801
802	mii = device_get_softc(sc->sc_mii);
803	NPE_LOCK(sc);
804	mii_pollstat(mii);
805	ifmr->ifm_active = mii->mii_media_active;
806	ifmr->ifm_status = mii->mii_media_status;
807	NPE_UNLOCK(sc);
808}
809
810static void
811npe_addstats(struct npe_softc *sc)
812{
813#define	MIBADD(x)	sc->mibdata.x += be32toh(ns->x)
814	struct ifnet *ifp = sc->sc_ifp;
815	struct npestats *ns = sc->sc_stats;
816
817	MIBADD(dot3StatsAlignmentErrors);
818	MIBADD(dot3StatsFCSErrors);
819	MIBADD(dot3StatsSingleCollisionFrames);
820	MIBADD(dot3StatsMultipleCollisionFrames);
821	MIBADD(dot3StatsDeferredTransmissions);
822	MIBADD(dot3StatsLateCollisions);
823	MIBADD(dot3StatsExcessiveCollisions);
824	MIBADD(dot3StatsInternalMacTransmitErrors);
825	MIBADD(dot3StatsCarrierSenseErrors);
826	sc->mibdata.dot3StatsFrameTooLongs +=
827	      be32toh(ns->RxLargeFramesDiscards)
828	    + be32toh(ns->TxLargeFrameDiscards);
829	MIBADD(dot3StatsInternalMacReceiveErrors);
830	sc->mibdata.dot3StatsMissedFrames +=
831	      be32toh(ns->RxOverrunDiscards)
832	    + be32toh(ns->RxUnderflowEntryDiscards);
833
834	ifp->if_oerrors +=
835		  be32toh(ns->dot3StatsInternalMacTransmitErrors)
836		+ be32toh(ns->dot3StatsCarrierSenseErrors)
837		+ be32toh(ns->TxVLANIdFilterDiscards)
838		;
839	ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
840		+ be32toh(ns->dot3StatsInternalMacReceiveErrors)
841		+ be32toh(ns->RxOverrunDiscards)
842		+ be32toh(ns->RxUnderflowEntryDiscards)
843		;
844	ifp->if_collisions +=
845		  be32toh(ns->dot3StatsSingleCollisionFrames)
846		+ be32toh(ns->dot3StatsMultipleCollisionFrames)
847		;
848#undef MIBADD
849}
850
851static void
852npe_tick(void *xsc)
853{
854#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
855	struct npe_softc *sc = xsc;
856	struct mii_data *mii = device_get_softc(sc->sc_mii);
857	uint32_t msg[2];
858
859	NPE_ASSERT_LOCKED(sc);
860
861	/*
862	 * NB: to avoid sleeping with the softc lock held we
863	 * split the NPE msg processing into two parts.  The
864	 * request for statistics is sent w/o waiting for a
865	 * reply and then on the next tick we retrieve the
866	 * results.  This works because npe_tick is the only
867	 * code that talks via the mailbox's (except at setup).
868	 * This likely can be handled better.
869	 */
870	if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
871		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
872		    BUS_DMASYNC_POSTREAD);
873		npe_addstats(sc);
874	}
875	npe_updatestats(sc);
876	mii_tick(mii);
877
878	npewatchdog(sc);
879
880	/* schedule next poll */
881	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
882#undef ACK
883}
884
885static void
886npe_setmac(struct npe_softc *sc, u_char *eaddr)
887{
888	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
889	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
890	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
891	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
892	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
893	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
894
895}
896
897static void
898npe_getmac(struct npe_softc *sc, u_char *eaddr)
899{
900	/* NB: the unicast address appears to be loaded from EEPROM on reset */
901	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
902	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
903	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
904	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
905	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
906	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
907}
908
909struct txdone {
910	struct npebuf *head;
911	struct npebuf **tail;
912	int count;
913};
914
915static __inline void
916npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
917{
918	struct ifnet *ifp = sc->sc_ifp;
919
920	NPE_LOCK(sc);
921	*td->tail = sc->tx_free;
922	sc->tx_free = td->head;
923	/*
924	 * We're no longer busy, so clear the busy flag and call the
925	 * start routine to xmit more packets.
926	 */
927	ifp->if_opackets += td->count;
928	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
929	sc->npe_watchdog_timer = 0;
930	npestart_locked(ifp);
931	NPE_UNLOCK(sc);
932}
933
934/*
935 * Q manager callback on tx done queue.  Reap mbufs
936 * and return tx buffers to the free list.  Finally
937 * restart output.  Note the microcode has only one
938 * txdone q wired into it so we must use the NPE ID
939 * returned with each npehwbuf to decide where to
940 * send buffers.
941 */
942static void
943npe_txdone(int qid, void *arg)
944{
945#define	P2V(a, dma) \
946	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
947	struct npe_softc *sc0 = arg;
948	struct npe_softc *sc;
949	struct npebuf *npe;
950	struct txdone *td, q[NPE_MAX];
951	uint32_t entry;
952
953	/* XXX no NPE-A support */
954	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
955	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
956	/* XXX max # at a time? */
957	while (ixpqmgr_qread(qid, &entry) == 0) {
958		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
959		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
960
961		sc = npes[NPE_QM_Q_NPE(entry)];
962		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
963		m_freem(npe->ix_m);
964		npe->ix_m = NULL;
965
966		td = &q[NPE_QM_Q_NPE(entry)];
967		*td->tail = npe;
968		td->tail = &npe->ix_next;
969		td->count++;
970	}
971
972	if (q[NPE_B].count)
973		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
974	if (q[NPE_C].count)
975		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
976#undef P2V
977}
978
979static int
980npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
981{
982	bus_dma_segment_t segs[1];
983	struct npedma *dma = &sc->rxdma;
984	struct npehwbuf *hw;
985	int error, nseg;
986
987	if (m == NULL) {
988		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
989		if (m == NULL)
990			return ENOBUFS;
991	}
992	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
993		("ext_size %d", m->m_ext.ext_size));
994	m->m_pkthdr.len = m->m_len = 1536;
995	/* backload payload and align ip hdr */
996	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
997	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
998			segs, &nseg, 0);
999	if (error != 0) {
1000		m_freem(m);
1001		return error;
1002	}
1003	hw = npe->ix_hw;
1004	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1005	/* NB: NPE requires length be a multiple of 64 */
1006	/* NB: buffer length is shifted in word */
1007	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1008	hw->ix_ne[0].next = 0;
1009	npe->ix_m = m;
1010	/* Flush the memory in the mbuf */
1011	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1012	return 0;
1013}
1014
1015/*
1016 * RX q processing for a specific NPE.  Claim entries
1017 * from the hardware queue and pass the frames up the
1018 * stack. Pass the rx buffers to the free list.
1019 */
1020static void
1021npe_rxdone(int qid, void *arg)
1022{
1023#define	P2V(a, dma) \
1024	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1025	struct npe_softc *sc = arg;
1026	struct npedma *dma = &sc->rxdma;
1027	uint32_t entry;
1028
1029	while (ixpqmgr_qread(qid, &entry) == 0) {
1030		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1031		struct mbuf *m;
1032
1033		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1034		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1035		/*
1036		 * Allocate a new mbuf to replenish the rx buffer.
1037		 * If doing so fails we drop the rx'd frame so we
1038		 * can reuse the previous mbuf.  When we're able to
1039		 * allocate a new mbuf dispatch the mbuf w/ rx'd
1040		 * data up the stack and replace it with the newly
1041		 * allocated one.
1042		 */
1043		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1044		if (m != NULL) {
1045			struct mbuf *mrx = npe->ix_m;
1046			struct npehwbuf *hw = npe->ix_hw;
1047			struct ifnet *ifp = sc->sc_ifp;
1048
1049			/* Flush mbuf memory for rx'd data */
1050			bus_dmamap_sync(dma->mtag, npe->ix_map,
1051			    BUS_DMASYNC_POSTREAD);
1052
1053			/* XXX flush hw buffer; works now 'cuz coherent */
1054			/* set m_len etc. per rx frame size */
1055			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1056			mrx->m_pkthdr.len = mrx->m_len;
1057			mrx->m_pkthdr.rcvif = ifp;
1058			mrx->m_flags |= M_HASFCS;
1059
1060			ifp->if_ipackets++;
1061			ifp->if_input(ifp, mrx);
1062		} else {
1063			/* discard frame and re-use mbuf */
1064			m = npe->ix_m;
1065		}
1066		if (npe_rxbuf_init(sc, npe, m) == 0) {
1067			/* return npe buf to rx free list */
1068			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1069		} else {
1070			/* XXX should not happen */
1071		}
1072	}
1073#undef P2V
1074}
1075
1076#ifdef DEVICE_POLLING
1077static void
1078npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1079{
1080	struct npe_softc *sc = ifp->if_softc;
1081
1082	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1083		npe_rxdone(sc->rx_qid, sc);
1084		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1085	}
1086}
1087#endif /* DEVICE_POLLING */
1088
1089static void
1090npe_startxmit(struct npe_softc *sc)
1091{
1092	struct npedma *dma = &sc->txdma;
1093	int i;
1094
1095	NPE_ASSERT_LOCKED(sc);
1096	sc->tx_free = NULL;
1097	for (i = 0; i < dma->nbuf; i++) {
1098		struct npebuf *npe = &dma->buf[i];
1099		if (npe->ix_m != NULL) {
1100			/* NB: should not happen */
1101			device_printf(sc->sc_dev,
1102			    "%s: free mbuf at entry %u\n", __func__, i);
1103			m_freem(npe->ix_m);
1104		}
1105		npe->ix_m = NULL;
1106		npe->ix_next = sc->tx_free;
1107		sc->tx_free = npe;
1108	}
1109}
1110
1111static void
1112npe_startrecv(struct npe_softc *sc)
1113{
1114	struct npedma *dma = &sc->rxdma;
1115	struct npebuf *npe;
1116	int i;
1117
1118	NPE_ASSERT_LOCKED(sc);
1119	for (i = 0; i < dma->nbuf; i++) {
1120		npe = &dma->buf[i];
1121		npe_rxbuf_init(sc, npe, npe->ix_m);
1122		/* set npe buf on rx free list */
1123		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1124	}
1125}
1126
1127/*
1128 * Reset and initialize the chip
1129 */
1130static void
1131npeinit_locked(void *xsc)
1132{
1133	struct npe_softc *sc = xsc;
1134	struct ifnet *ifp = sc->sc_ifp;
1135
1136	NPE_ASSERT_LOCKED(sc);
1137if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1138
1139	/*
1140	 * Reset MAC core.
1141	 */
1142	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1143	DELAY(NPE_MAC_RESET_DELAY);
1144	/* configure MAC to generate MDC clock */
1145	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1146
1147	/* disable transmitter and reciver in the MAC */
1148 	WR4(sc, NPE_MAC_RX_CNTRL1,
1149	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1150 	WR4(sc, NPE_MAC_TX_CNTRL1,
1151	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1152
1153	/*
1154	 * Set the MAC core registers.
1155	 */
1156	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1157	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1158	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1159	/* thresholds determined by NPE firmware FS */
1160	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1161	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1162	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1163	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1164	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1165	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1166	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1167	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1168
1169	WR4(sc, NPE_MAC_TX_CNTRL1,
1170		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1171		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1172		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1173		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1174	/* XXX pad strip? */
1175	WR4(sc, NPE_MAC_RX_CNTRL1,
1176		  NPE_RX_CNTRL1_CRC_EN		/* include CRC/FCS */
1177		| NPE_RX_CNTRL1_PAUSE_EN);	/* ena pause frame handling */
1178	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1179
1180	npe_setmac(sc, IF_LLADDR(ifp));
1181	npe_setmcast(sc);
1182
1183	npe_startxmit(sc);
1184	npe_startrecv(sc);
1185
1186	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1187	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1188	sc->npe_watchdog_timer = 0;		/* just in case */
1189
1190	/* enable transmitter and reciver in the MAC */
1191 	WR4(sc, NPE_MAC_RX_CNTRL1,
1192	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1193 	WR4(sc, NPE_MAC_TX_CNTRL1,
1194	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1195
1196	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1197}
1198
1199static void
1200npeinit(void *xsc)
1201{
1202	struct npe_softc *sc = xsc;
1203	NPE_LOCK(sc);
1204	npeinit_locked(sc);
1205	NPE_UNLOCK(sc);
1206}
1207
1208/*
1209 * Dequeue packets and place on the h/w transmit queue.
1210 */
1211static void
1212npestart_locked(struct ifnet *ifp)
1213{
1214	struct npe_softc *sc = ifp->if_softc;
1215	struct npebuf *npe;
1216	struct npehwbuf *hw;
1217	struct mbuf *m, *n;
1218	struct npedma *dma = &sc->txdma;
1219	bus_dma_segment_t segs[NPE_MAXSEG];
1220	int nseg, len, error, i;
1221	uint32_t next;
1222
1223	NPE_ASSERT_LOCKED(sc);
1224	/* XXX can this happen? */
1225	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1226		return;
1227
1228	while (sc->tx_free != NULL) {
1229		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1230		if (m == NULL) {
1231			/* XXX? */
1232			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1233			return;
1234		}
1235		npe = sc->tx_free;
1236		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1237		    m, segs, &nseg, 0);
1238		if (error == EFBIG) {
1239			n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
1240			if (n == NULL) {
1241				if_printf(ifp, "%s: too many fragments %u\n",
1242				    __func__, nseg);
1243				m_freem(m);
1244				return;	/* XXX? */
1245			}
1246			m = n;
1247			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1248			    m, segs, &nseg, 0);
1249		}
1250		if (error != 0 || nseg == 0) {
1251			if_printf(ifp, "%s: error %u nseg %u\n",
1252			    __func__, error, nseg);
1253			m_freem(m);
1254			return;	/* XXX? */
1255		}
1256		sc->tx_free = npe->ix_next;
1257
1258		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1259
1260		/*
1261		 * Tap off here if there is a bpf listener.
1262		 */
1263		BPF_MTAP(ifp, m);
1264
1265		npe->ix_m = m;
1266		hw = npe->ix_hw;
1267		len = m->m_pkthdr.len;
1268		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1269		for (i = 0; i < nseg; i++) {
1270			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1271			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1272			hw->ix_ne[i].next = htobe32(next);
1273
1274			len = 0;		/* zero for segments > 1 */
1275			next += sizeof(hw->ix_ne[0]);
1276		}
1277		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1278		/* XXX flush descriptor instead of using uncached memory */
1279
1280		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1281		    __func__, sc->tx_qid, npe->ix_neaddr,
1282		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1283		/* stick it on the tx q */
1284		/* XXX add vlan priority */
1285		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1286
1287		sc->npe_watchdog_timer = 5;
1288	}
1289	if (sc->tx_free == NULL)
1290		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1291}
1292
1293void
1294npestart(struct ifnet *ifp)
1295{
1296	struct npe_softc *sc = ifp->if_softc;
1297	NPE_LOCK(sc);
1298	npestart_locked(ifp);
1299	NPE_UNLOCK(sc);
1300}
1301
1302static void
1303npe_stopxmit(struct npe_softc *sc)
1304{
1305	struct npedma *dma = &sc->txdma;
1306	int i;
1307
1308	NPE_ASSERT_LOCKED(sc);
1309
1310	/* XXX qmgr */
1311	for (i = 0; i < dma->nbuf; i++) {
1312		struct npebuf *npe = &dma->buf[i];
1313
1314		if (npe->ix_m != NULL) {
1315			bus_dmamap_unload(dma->mtag, npe->ix_map);
1316			m_freem(npe->ix_m);
1317			npe->ix_m = NULL;
1318		}
1319	}
1320}
1321
1322static void
1323npe_stoprecv(struct npe_softc *sc)
1324{
1325	struct npedma *dma = &sc->rxdma;
1326	int i;
1327
1328	NPE_ASSERT_LOCKED(sc);
1329
1330	/* XXX qmgr */
1331	for (i = 0; i < dma->nbuf; i++) {
1332		struct npebuf *npe = &dma->buf[i];
1333
1334		if (npe->ix_m != NULL) {
1335			bus_dmamap_unload(dma->mtag, npe->ix_map);
1336			m_freem(npe->ix_m);
1337			npe->ix_m = NULL;
1338		}
1339	}
1340}
1341
1342/*
1343 * Turn off interrupts, and stop the nic.
1344 */
1345void
1346npestop(struct npe_softc *sc)
1347{
1348	struct ifnet *ifp = sc->sc_ifp;
1349
1350	/*  disable transmitter and reciver in the MAC  */
1351 	WR4(sc, NPE_MAC_RX_CNTRL1,
1352	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1353 	WR4(sc, NPE_MAC_TX_CNTRL1,
1354	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1355
1356	sc->npe_watchdog_timer = 0;
1357	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1358
1359	callout_stop(&sc->tick_ch);
1360
1361	npe_stopxmit(sc);
1362	npe_stoprecv(sc);
1363	/* XXX go into loopback & drain q's? */
1364	/* XXX but beware of disabling tx above */
1365
1366	/*
1367	 * The MAC core rx/tx disable may leave the MAC hardware in an
1368	 * unpredictable state. A hw reset is executed before resetting
1369	 * all the MAC parameters to a known value.
1370	 */
1371	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1372	DELAY(NPE_MAC_RESET_DELAY);
1373	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1374	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1375}
1376
1377void
1378npewatchdog(struct npe_softc *sc)
1379{
1380	NPE_ASSERT_LOCKED(sc);
1381
1382	if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1383		return;
1384
1385	device_printf(sc->sc_dev, "watchdog timeout\n");
1386	sc->sc_ifp->if_oerrors++;
1387
1388	npeinit_locked(sc);
1389}
1390
1391static int
1392npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1393{
1394	struct npe_softc *sc = ifp->if_softc;
1395 	struct mii_data *mii;
1396 	struct ifreq *ifr = (struct ifreq *)data;
1397	int error = 0;
1398#ifdef DEVICE_POLLING
1399	int mask;
1400#endif
1401
1402	switch (cmd) {
1403	case SIOCSIFFLAGS:
1404		NPE_LOCK(sc);
1405		if ((ifp->if_flags & IFF_UP) == 0 &&
1406		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1407			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1408			npestop(sc);
1409		} else {
1410			/* reinitialize card on any parameter change */
1411			npeinit_locked(sc);
1412		}
1413		NPE_UNLOCK(sc);
1414		break;
1415
1416	case SIOCADDMULTI:
1417	case SIOCDELMULTI:
1418		/* update multicast filter list. */
1419		NPE_LOCK(sc);
1420		npe_setmcast(sc);
1421		NPE_UNLOCK(sc);
1422		error = 0;
1423		break;
1424
1425  	case SIOCSIFMEDIA:
1426  	case SIOCGIFMEDIA:
1427 		mii = device_get_softc(sc->sc_mii);
1428 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1429  		break;
1430
1431#ifdef DEVICE_POLLING
1432	case SIOCSIFCAP:
1433		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1434		if (mask & IFCAP_POLLING) {
1435			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1436				error = ether_poll_register(npe_poll, ifp);
1437				if (error)
1438					return error;
1439				NPE_LOCK(sc);
1440				/* disable callbacks XXX txdone is shared */
1441				ixpqmgr_notify_disable(sc->rx_qid);
1442				ixpqmgr_notify_disable(sc->tx_doneqid);
1443				ifp->if_capenable |= IFCAP_POLLING;
1444				NPE_UNLOCK(sc);
1445			} else {
1446				error = ether_poll_deregister(ifp);
1447				/* NB: always enable qmgr callbacks */
1448				NPE_LOCK(sc);
1449				/* enable qmgr callbacks */
1450				ixpqmgr_notify_enable(sc->rx_qid,
1451				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1452				ixpqmgr_notify_enable(sc->tx_doneqid,
1453				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1454				ifp->if_capenable &= ~IFCAP_POLLING;
1455				NPE_UNLOCK(sc);
1456			}
1457		}
1458		break;
1459#endif
1460	default:
1461		error = ether_ioctl(ifp, cmd, data);
1462		break;
1463	}
1464	return error;
1465}
1466
1467/*
1468 * Setup a traffic class -> rx queue mapping.
1469 */
1470static int
1471npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1472{
1473	int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid;
1474	uint32_t msg[2];
1475
1476	msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix;
1477	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1478	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1479}
1480
1481/*
1482 * Update and reset the statistics in the NPE.
1483 */
1484static int
1485npe_updatestats(struct npe_softc *sc)
1486{
1487	uint32_t msg[2];
1488
1489	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1490	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1491	return ixpnpe_sendmsg(sc->sc_npe, msg);		/* NB: no recv */
1492}
1493
1494#if 0
1495/*
1496 * Get the current statistics block.
1497 */
1498static int
1499npe_getstats(struct npe_softc *sc)
1500{
1501	uint32_t msg[2];
1502
1503	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1504	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1505	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1506}
1507
1508/*
1509 * Query the image id of the loaded firmware.
1510 */
1511static uint32_t
1512npe_getimageid(struct npe_softc *sc)
1513{
1514	uint32_t msg[2];
1515
1516	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1517	msg[1] = 0;
1518	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1519}
1520
1521/*
1522 * Enable/disable loopback.
1523 */
1524static int
1525npe_setloopback(struct npe_softc *sc, int ena)
1526{
1527	uint32_t msg[2];
1528
1529	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1530	msg[1] = 0;
1531	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1532}
1533#endif
1534
1535static void
1536npe_child_detached(device_t dev, device_t child)
1537{
1538	struct npe_softc *sc;
1539
1540	sc = device_get_softc(dev);
1541	if (child == sc->sc_mii)
1542		sc->sc_mii = NULL;
1543}
1544
1545/*
1546 * MII bus support routines.
1547 */
1548static uint32_t
1549npe_mii_mdio_read(struct npe_softc *sc, int reg)
1550{
1551#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1552	uint32_t v;
1553
1554	/* NB: registers are known to be sequential */
1555	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1556	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1557	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1558	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1559	return v;
1560#undef MII_RD4
1561}
1562
1563static void
1564npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1565{
1566#define	MII_WR4(sc, reg, v) \
1567	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1568
1569	/* NB: registers are known to be sequential */
1570	MII_WR4(sc, reg+0, cmd & 0xff);
1571	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1572	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1573	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1574#undef MII_WR4
1575}
1576
1577static int
1578npe_mii_mdio_wait(struct npe_softc *sc)
1579{
1580#define	MAXTRIES	100	/* XXX */
1581	uint32_t v;
1582	int i;
1583
1584	for (i = 0; i < MAXTRIES; i++) {
1585		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1586		if ((v & NPE_MII_GO) == 0)
1587			return 1;
1588	}
1589	return 0;		/* NB: timeout */
1590#undef MAXTRIES
1591}
1592
1593static int
1594npe_miibus_readreg(device_t dev, int phy, int reg)
1595{
1596	struct npe_softc *sc = device_get_softc(dev);
1597	uint32_t v;
1598
1599	if (phy != sc->sc_phy)		/* XXX no auto-detect */
1600		return 0xffff;
1601	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1602	  | NPE_MII_GO;
1603	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1604	if (npe_mii_mdio_wait(sc))
1605		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1606	else
1607		v = 0xffff | NPE_MII_READ_FAIL;
1608	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1609#undef MAXTRIES
1610}
1611
1612static void
1613npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1614{
1615	struct npe_softc *sc = device_get_softc(dev);
1616	uint32_t v;
1617
1618	if (phy != sc->sc_phy)		/* XXX */
1619		return;
1620	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1621	  | data | NPE_MII_WRITE
1622	  | NPE_MII_GO;
1623	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1624	/* XXX complain about timeout */
1625	(void) npe_mii_mdio_wait(sc);
1626}
1627
1628static void
1629npe_miibus_statchg(device_t dev)
1630{
1631	struct npe_softc *sc = device_get_softc(dev);
1632	struct mii_data *mii = device_get_softc(sc->sc_mii);
1633	uint32_t tx1, rx1;
1634
1635	/* sync MAC duplex state */
1636	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1637	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1638	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1639		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1640		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1641	} else {
1642		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1643		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1644	}
1645	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1646	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1647}
1648
1649static device_method_t npe_methods[] = {
1650	/* Device interface */
1651	DEVMETHOD(device_probe,		npe_probe),
1652	DEVMETHOD(device_attach,	npe_attach),
1653	DEVMETHOD(device_detach,	npe_detach),
1654
1655	/* Bus interface */
1656	DEVMETHOD(bus_child_detached,	npe_child_detached),
1657
1658	/* MII interface */
1659	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1660	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1661	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1662
1663	{ 0, 0 }
1664};
1665
1666static driver_t npe_driver = {
1667	"npe",
1668	npe_methods,
1669	sizeof(struct npe_softc),
1670};
1671
1672DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1673DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1674MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1675MODULE_DEPEND(npe, miibus, 1, 1, 1);
1676MODULE_DEPEND(npe, ether, 1, 1, 1);
1677