if_npe.c revision 186352
1/*-
2 * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 186352 2008-12-20 03:26:09Z sam $");
27
28/*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 */
43#ifdef HAVE_KERNEL_OPTION_HEADERS
44#include "opt_device_polling.h"
45#endif
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/kernel.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/module.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/endian.h>
59#include <machine/bus.h>
60
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_mib.h>
67#include <net/if_types.h>
68
69#ifdef INET
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/in_var.h>
73#include <netinet/ip.h>
74#endif
75
76#include <net/bpf.h>
77#include <net/bpfdesc.h>
78
79#include <arm/xscale/ixp425/ixp425reg.h>
80#include <arm/xscale/ixp425/ixp425var.h>
81#include <arm/xscale/ixp425/ixp425_qmgr.h>
82#include <arm/xscale/ixp425/ixp425_npevar.h>
83
84#include <dev/mii/mii.h>
85#include <dev/mii/miivar.h>
86#include <arm/xscale/ixp425/if_npereg.h>
87
88#include <machine/armreg.h>
89
90#include "miibus_if.h"
91
92/*
93 * XXX: For the main bus dma tag. Can go away if the new method to get the
94 * dma tag from the parent got MFC'd into RELENG_6.
95 */
96extern struct ixp425_softc *ixp425_softc;
97
98struct npebuf {
99	struct npebuf	*ix_next;	/* chain to next buffer */
100	void		*ix_m;		/* backpointer to mbuf */
101	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
102	struct npehwbuf	*ix_hw;		/* associated h/w block */
103	uint32_t	ix_neaddr;	/* phys address of ix_hw */
104};
105
106struct npedma {
107	const char*	name;
108	int		nbuf;		/* # npebuf's allocated */
109	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
110	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
111	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
112	bus_dmamap_t	buf_map;
113	bus_addr_t	buf_phys;	/* phys addr of buffers */
114	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
115};
116
117struct npe_softc {
118	/* XXX mii requires this be first; do not move! */
119	struct ifnet	*sc_ifp;	/* ifnet pointer */
120	struct mtx	sc_mtx;		/* basically a perimeter lock */
121	device_t	sc_dev;
122	bus_space_tag_t	sc_iot;
123	bus_space_handle_t sc_ioh;	/* MAC register window */
124	device_t	sc_mii;		/* child miibus */
125	bus_space_handle_t sc_miih;	/* MII register window */
126	int		sc_npeid;
127	struct ixpnpe_softc *sc_npe;	/* NPE support */
128	int		sc_debug;	/* DPRINTF* control */
129	int		sc_tickinterval;
130	struct callout	tick_ch;	/* Tick callout */
131	int		npe_watchdog_timer;
132	struct npedma	txdma;
133	struct npebuf	*tx_free;	/* list of free tx buffers */
134	struct npedma	rxdma;
135	bus_addr_t	buf_phys;	/* XXX for returning a value */
136	int		rx_qid;		/* rx qid */
137	int		rx_freeqid;	/* rx free buffers qid */
138	int		tx_qid;		/* tx qid */
139	int		tx_doneqid;	/* tx completed qid */
140	int		sc_phy;		/* PHY id */
141	struct ifmib_iso_8802_3 mibdata;
142	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
143	struct npestats	*sc_stats;
144	bus_dmamap_t	sc_stats_map;
145	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
146};
147
148/*
149 * Static configuration for IXP425.  The tx and
150 * rx free Q id's are fixed by the NPE microcode.  The
151 * rx Q id's are programmed to be separate to simplify
152 * multi-port processing.  It may be better to handle
153 * all traffic through one Q (as done by the Intel drivers).
154 *
155 * Note that the PHY's are accessible only from MAC A
156 * on the IXP425.  This and other platform-specific
157 * assumptions probably need to be handled through hints.
158 */
159static const struct {
160	uint32_t	imageid;	/* default fw image */
161	uint32_t	macbase;
162	uint32_t	miibase;
163	int		phy;		/* phy id */
164	uint8_t		rx_qid;
165	uint8_t		rx_freeqid;
166	uint8_t		tx_qid;
167	uint8_t		tx_doneqid;
168} npeconfig[NPE_MAX] = {
169	[NPE_A] = {
170	  .imageid	= IXP425_NPE_A_IMAGEID,
171	  .macbase	= IXP435_MAC_A_HWBASE,
172	  .miibase	= IXP425_MAC_C_HWBASE,
173	  .phy		= 2,
174	  .rx_qid	= 4,
175	  .rx_freeqid	= 26,
176	  .tx_qid	= 23,
177	  .tx_doneqid	= 31
178	},
179	[NPE_B] = {
180	  .imageid	= IXP425_NPE_B_IMAGEID,
181	  .macbase	= IXP425_MAC_B_HWBASE,
182	  .miibase	= IXP425_MAC_C_HWBASE,
183	  .phy		= 0,
184	  .rx_qid	= 4,
185	  .rx_freeqid	= 27,
186	  .tx_qid	= 24,
187	  .tx_doneqid	= 31
188	},
189	[NPE_C] = {
190	  .imageid	= IXP425_NPE_C_IMAGEID,
191	  .macbase	= IXP425_MAC_C_HWBASE,
192	  .miibase	= IXP425_MAC_C_HWBASE,
193	  .phy		= 1,
194	  .rx_qid	= 12,
195	  .rx_freeqid	= 28,
196	  .tx_qid	= 25,
197	  .tx_doneqid	= 31
198	},
199};
200static struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
201
202static __inline uint32_t
203RD4(struct npe_softc *sc, bus_size_t off)
204{
205	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
206}
207
208static __inline void
209WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
210{
211	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
212}
213
214#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
215#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
216#define NPE_LOCK_INIT(_sc) \
217	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
218	    MTX_NETWORK_LOCK, MTX_DEF)
219#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
220#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
221#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
222
223static devclass_t npe_devclass;
224
225static int	override_npeid(device_t, const char *resname, int *val);
226static int	npe_activate(device_t dev);
227static void	npe_deactivate(device_t dev);
228static int	npe_ifmedia_update(struct ifnet *ifp);
229static void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
230static void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
231static void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
232static void	npe_txdone(int qid, void *arg);
233static int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
234			struct mbuf *);
235static void	npe_rxdone(int qid, void *arg);
236static void	npeinit(void *);
237static void	npestart_locked(struct ifnet *);
238static void	npestart(struct ifnet *);
239static void	npestop(struct npe_softc *);
240static void	npewatchdog(struct npe_softc *);
241static int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
242
243static int	npe_setrxqosentry(struct npe_softc *, int classix,
244			int trafclass, int qid);
245static int	npe_setfirewallmode(struct npe_softc *, int onoff);
246static int	npe_updatestats(struct npe_softc *);
247#if 0
248static int	npe_getstats(struct npe_softc *);
249static uint32_t	npe_getimageid(struct npe_softc *);
250static int	npe_setloopback(struct npe_softc *, int ena);
251#endif
252
253/* NB: all tx done processing goes through one queue */
254static int tx_doneqid = -1;
255
256SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP4XX NPE driver parameters");
257
258static int npe_debug = 0;
259SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
260	   0, "IXP4XX NPE network interface debug msgs");
261TUNABLE_INT("hw.npe.npe", &npe_debug);
262#define	DPRINTF(sc, fmt, ...) do {					\
263	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
264} while (0)
265#define	DPRINTFn(n, sc, fmt, ...) do {					\
266	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
267} while (0)
268static int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
269SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
270	    0, "periodic work interval (secs)");
271TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
272
273static	int npe_rxbuf = 64;		/* # rx buffers to allocate */
274SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
275	    0, "rx buffers allocated");
276TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
277static	int npe_txbuf = 128;		/* # tx buffers to allocate */
278SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
279	    0, "tx buffers allocated");
280TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
281
282static int
283unit2npeid(int unit)
284{
285	static const int npeidmap[2][3] = {
286		/* on 425 A is for HSS, B & C are for Ethernet */
287		{ NPE_B, NPE_C, -1 },	/* IXP425 */
288		/* 435 only has A & C, order C then A */
289		{ NPE_C, NPE_A, -1 },	/* IXP435 */
290	};
291	/* XXX check feature register instead */
292	return (unit < 3 ? npeidmap[
293	    (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
294}
295
296static int
297npe_probe(device_t dev)
298{
299	static const char *desc[NPE_MAX] = {
300		[NPE_A] = "IXP NPE-A",
301		[NPE_B] = "IXP NPE-B",
302		[NPE_C] = "IXP NPE-C"
303	};
304	int npeid;
305
306	npeid = -1;
307	if (!override_npeid(dev, "npeid", &npeid))
308		npeid = unit2npeid(device_get_unit(dev));
309	if (npeid == -1) {
310		device_printf(dev, "unit not supported\n");
311		return EINVAL;
312	}
313	/* XXX check feature register to see if enabled */
314	device_set_desc(dev, desc[npeid]);
315	return 0;
316}
317
318static int
319npe_attach(device_t dev)
320{
321	struct npe_softc *sc = device_get_softc(dev);
322	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
323	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
324	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
325	struct ifnet *ifp;
326	int error;
327	u_char eaddr[6];
328
329	sc->sc_dev = dev;
330	sc->sc_iot = sa->sc_iot;
331	NPE_LOCK_INIT(sc);
332	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
333	sc->sc_debug = npe_debug;
334	sc->sc_tickinterval = npe_tickinterval;
335
336	ifp = if_alloc(IFT_ETHER);
337	if (ifp == NULL) {
338		device_printf(dev, "cannot allocate ifnet\n");
339		error = EIO;		/* XXX */
340		goto out;
341	}
342	/* NB: must be setup prior to invoking mii code */
343	sc->sc_ifp = ifp;
344
345	error = npe_activate(dev);
346	if (error) {
347		device_printf(dev, "cannot activate npe\n");
348		goto out;
349	}
350
351	npe_getmac(sc, eaddr);
352
353	ifp->if_softc = sc;
354	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
355	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
356	ifp->if_start = npestart;
357	ifp->if_ioctl = npeioctl;
358	ifp->if_init = npeinit;
359	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
360	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
361	IFQ_SET_READY(&ifp->if_snd);
362	ifp->if_linkmib = &sc->mibdata;
363	ifp->if_linkmiblen = sizeof(sc->mibdata);
364	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
365#ifdef DEVICE_POLLING
366	ifp->if_capabilities |= IFCAP_POLLING;
367#endif
368
369	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
370	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
371	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
372	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
373
374	ether_ifattach(ifp, eaddr);
375	return 0;
376out:
377	if (ifp != NULL)
378		if_free(ifp);
379	NPE_LOCK_DESTROY(sc);
380	npe_deactivate(dev);
381	return error;
382}
383
384static int
385npe_detach(device_t dev)
386{
387	struct npe_softc *sc = device_get_softc(dev);
388	struct ifnet *ifp = sc->sc_ifp;
389
390#ifdef DEVICE_POLLING
391	if (ifp->if_capenable & IFCAP_POLLING)
392		ether_poll_deregister(ifp);
393#endif
394	npestop(sc);
395	if (ifp != NULL) {
396		ether_ifdetach(ifp);
397		if_free(ifp);
398	}
399	NPE_LOCK_DESTROY(sc);
400	npe_deactivate(dev);
401	return 0;
402}
403
404/*
405 * Compute and install the multicast filter.
406 */
407static void
408npe_setmcast(struct npe_softc *sc)
409{
410	struct ifnet *ifp = sc->sc_ifp;
411	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
412	int i;
413
414	if (ifp->if_flags & IFF_PROMISC) {
415		memset(mask, 0, ETHER_ADDR_LEN);
416		memset(addr, 0, ETHER_ADDR_LEN);
417	} else if (ifp->if_flags & IFF_ALLMULTI) {
418		static const uint8_t allmulti[ETHER_ADDR_LEN] =
419		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
420		memcpy(mask, allmulti, ETHER_ADDR_LEN);
421		memcpy(addr, allmulti, ETHER_ADDR_LEN);
422	} else {
423		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
424		struct ifmultiaddr *ifma;
425		const uint8_t *mac;
426
427		memset(clr, 0, ETHER_ADDR_LEN);
428		memset(set, 0xff, ETHER_ADDR_LEN);
429
430		IF_ADDR_LOCK(ifp);
431		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
432			if (ifma->ifma_addr->sa_family != AF_LINK)
433				continue;
434			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
435			for (i = 0; i < ETHER_ADDR_LEN; i++) {
436				clr[i] |= mac[i];
437				set[i] &= mac[i];
438			}
439		}
440		IF_ADDR_UNLOCK(ifp);
441
442		for (i = 0; i < ETHER_ADDR_LEN; i++) {
443			mask[i] = set[i] | ~clr[i];
444			addr[i] = set[i];
445		}
446	}
447
448	/*
449	 * Write the mask and address registers.
450	 */
451	for (i = 0; i < ETHER_ADDR_LEN; i++) {
452		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
453		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
454	}
455}
456
457static void
458npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
459{
460	struct npe_softc *sc;
461
462	if (error != 0)
463		return;
464	sc = (struct npe_softc *)arg;
465	sc->buf_phys = segs[0].ds_addr;
466}
467
468static int
469npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
470	const char *name, int nbuf, int maxseg)
471{
472	int error, i;
473
474	memset(dma, 0, sizeof(*dma));
475
476	dma->name = name;
477	dma->nbuf = nbuf;
478
479	/* DMA tag for mapped mbufs  */
480	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
481	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
482	    MCLBYTES, maxseg, MCLBYTES, 0,
483	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
484	if (error != 0) {
485		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
486		     "error %u\n", dma->name, error);
487		return error;
488	}
489
490	/* DMA tag and map for the NPE buffers */
491	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
492	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
493	    nbuf * sizeof(struct npehwbuf), 1,
494	    nbuf * sizeof(struct npehwbuf), 0,
495	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
496	if (error != 0) {
497		device_printf(sc->sc_dev,
498		    "unable to create %s npebuf dma tag, error %u\n",
499		    dma->name, error);
500		return error;
501	}
502	/* XXX COHERENT for now */
503	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
504	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
505	    &dma->buf_map) != 0) {
506		device_printf(sc->sc_dev,
507		     "unable to allocate memory for %s h/w buffers, error %u\n",
508		     dma->name, error);
509		return error;
510	}
511	/* XXX M_TEMP */
512	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
513	if (dma->buf == NULL) {
514		device_printf(sc->sc_dev,
515		     "unable to allocate memory for %s s/w buffers\n",
516		     dma->name);
517		return error;
518	}
519	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
520	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
521		device_printf(sc->sc_dev,
522		     "unable to map memory for %s h/w buffers, error %u\n",
523		     dma->name, error);
524		return error;
525	}
526	dma->buf_phys = sc->buf_phys;
527	for (i = 0; i < dma->nbuf; i++) {
528		struct npebuf *npe = &dma->buf[i];
529		struct npehwbuf *hw = &dma->hwbuf[i];
530
531		/* calculate offset to shared area */
532		npe->ix_neaddr = dma->buf_phys +
533			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
534		KASSERT((npe->ix_neaddr & 0x1f) == 0,
535		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
536		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
537				&npe->ix_map);
538		if (error != 0) {
539			device_printf(sc->sc_dev,
540			     "unable to create dmamap for %s buffer %u, "
541			     "error %u\n", dma->name, i, error);
542			return error;
543		}
544		npe->ix_hw = hw;
545	}
546	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
547	return 0;
548}
549
550static void
551npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
552{
553	int i;
554
555	if (dma->hwbuf != NULL) {
556		for (i = 0; i < dma->nbuf; i++) {
557			struct npebuf *npe = &dma->buf[i];
558			bus_dmamap_destroy(dma->mtag, npe->ix_map);
559		}
560		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
561		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
562	}
563	if (dma->buf != NULL)
564		free(dma->buf, M_TEMP);
565	if (dma->buf_tag)
566		bus_dma_tag_destroy(dma->buf_tag);
567	if (dma->mtag)
568		bus_dma_tag_destroy(dma->mtag);
569	memset(dma, 0, sizeof(*dma));
570}
571
572static int
573override_addr(device_t dev, const char *resname, int *base)
574{
575	int unit = device_get_unit(dev);
576	const char *resval;
577
578	/* XXX warn for wrong hint type */
579	if (resource_string_value("npe", unit, resname, &resval) != 0)
580		return 0;
581	switch (resval[0]) {
582	case 'A':
583		*base = IXP435_MAC_A_HWBASE;
584		break;
585	case 'B':
586		*base = IXP425_MAC_B_HWBASE;
587		break;
588	case 'C':
589		*base = IXP425_MAC_C_HWBASE;
590		break;
591	default:
592		device_printf(dev, "Warning, bad value %s for "
593		    "npe.%d.%s ignored\n", resval, unit, resname);
594		return 0;
595	}
596	if (bootverbose)
597		device_printf(dev, "using npe.%d.%s=%s override\n",
598		    unit, resname, resval);
599	return 1;
600}
601
602static int
603override_npeid(device_t dev, const char *resname, int *npeid)
604{
605	int unit = device_get_unit(dev);
606	const char *resval;
607
608	/* XXX warn for wrong hint type */
609	if (resource_string_value("npe", unit, resname, &resval) != 0)
610		return 0;
611	switch (resval[0]) {
612	case 'A': *npeid = NPE_A; break;
613	case 'B': *npeid = NPE_B; break;
614	case 'C': *npeid = NPE_C; break;
615	default:
616		device_printf(dev, "Warning, bad value %s for "
617		    "npe.%d.%s ignored\n", resval, unit, resname);
618		return 0;
619	}
620	if (bootverbose)
621		device_printf(dev, "using npe.%d.%s=%s override\n",
622		    unit, resname, resval);
623	return 1;
624}
625
626static int
627override_unit(device_t dev, const char *resname, int *val, int min, int max)
628{
629	int unit = device_get_unit(dev);
630	int resval;
631
632	if (resource_int_value("npe", unit, resname, &resval) != 0)
633		return 0;
634	if (!(min <= resval && resval <= max)) {
635		device_printf(dev, "Warning, bad value %d for npe.%d.%s "
636		    "ignored (value must be [%d-%d])\n", resval, unit,
637		    resname, min, max);
638		return 0;
639	}
640	if (bootverbose)
641		device_printf(dev, "using npe.%d.%s=%d override\n",
642		    unit, resname, resval);
643	*val = resval;
644	return 1;
645}
646
647static int
648override_imageid(device_t dev, const char *resname, uint32_t *val)
649{
650	int unit = device_get_unit(dev);
651	int resval;
652
653	if (resource_int_value("npe", unit, resname, &resval) != 0)
654		return 0;
655	/* XXX validate */
656	if (bootverbose)
657		device_printf(dev, "using npe.%d.%s=0x%x override\n",
658		    unit, resname, resval);
659	*val = resval;
660	return 1;
661}
662
663static void
664npe_mac_reset(struct npe_softc *sc)
665{
666	/*
667	 * Reset MAC core.
668	 */
669	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
670	DELAY(NPE_MAC_RESET_DELAY);
671	/* configure MAC to generate MDC clock */
672	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
673}
674
675static int
676npe_activate(device_t dev)
677{
678	struct npe_softc * sc = device_get_softc(dev);
679	int error, i, macbase, miibase;
680	uint32_t imageid, msg[2];
681
682	/*
683	 * Setup NEP ID, MAC, and MII bindings.  We allow override
684	 * via hints to handle unexpected board configs.
685	 */
686	if (!override_npeid(dev, "npeid", &sc->sc_npeid))
687		sc->sc_npeid = unit2npeid(device_get_unit(dev));
688	sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
689	if (sc->sc_npe == NULL) {
690		device_printf(dev, "cannot attach ixpnpe\n");
691		return EIO;		/* XXX */
692	}
693
694	/* MAC */
695	if (!override_addr(dev, "mac", &macbase))
696		macbase = npeconfig[sc->sc_npeid].macbase;
697	device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
698	if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
699		device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
700		    macbase, IXP425_REG_SIZE);
701		return ENOMEM;
702	}
703
704	/* PHY */
705	if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1))
706		sc->sc_phy = npeconfig[sc->sc_npeid].phy;
707	if (!override_addr(dev, "mii", &miibase))
708		miibase = npeconfig[sc->sc_npeid].miibase;
709	device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
710	if (miibase != macbase) {
711		/*
712		 * PHY is mapped through a different MAC, setup an
713		 * additional mapping for frobbing the PHY registers.
714		 */
715		if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
716			device_printf(dev,
717			    "cannot map MII registers 0x%x:0x%x\n",
718			    miibase, IXP425_REG_SIZE);
719			return ENOMEM;
720		}
721	} else
722		sc->sc_miih = sc->sc_ioh;
723
724	/*
725	 * Load NPE firmware and start it running.  We assume
726	 * that minor version bumps remain compatible so probe
727	 * the firmware image starting with the expected version
728	 * and then bump the minor version up to the max.
729	 */
730	if (!override_imageid(dev, "imageid", &imageid))
731		imageid = npeconfig[sc->sc_npeid].imageid;
732	for (;;) {
733		error = ixpnpe_init(sc->sc_npe, "npe_fw", imageid);
734		if (error == 0)
735			break;
736		/* ESRCH is returned when the requested image is not present */
737		if (error != ESRCH) {
738			device_printf(dev, "cannot init NPE (error %d)\n",
739			    error);
740			return error;
741		}
742		/* bump the minor version up to the max possible */
743		if (NPEIMAGE_MINOR(imageid) == 0xff) {
744			device_printf(dev, "cannot locate firmware "
745			    "(imageid 0x%08x)\n", imageid);
746			return error;
747		}
748		imageid++;
749	}
750	/* NB: firmware should respond with a status msg */
751	if (ixpnpe_recvmsg_sync(sc->sc_npe, msg) != 0) {
752		device_printf(dev, "firmware did not respond as expected\n");
753		return EIO;
754	}
755
756	/* probe for PHY */
757	if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
758		device_printf(dev, "cannot find PHY %d.\n", sc->sc_phy);
759		return ENXIO;
760	}
761
762	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
763	if (error != 0)
764		return error;
765	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
766	if (error != 0)
767		return error;
768
769	/* setup statistics block */
770	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
771	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
772	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
773	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
774	if (error != 0) {
775		device_printf(sc->sc_dev, "unable to create stats tag, "
776		     "error %u\n", error);
777		return error;
778	}
779	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
780	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
781		device_printf(sc->sc_dev,
782		     "unable to allocate memory for stats block, error %u\n",
783		     error);
784		return error;
785	}
786	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
787	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
788		device_printf(sc->sc_dev,
789		     "unable to load memory for stats block, error %u\n",
790		     error);
791		return error;
792	}
793	sc->sc_stats_phys = sc->buf_phys;
794
795	/*
796	 * Setup h/w rx/tx queues.  There are four q's:
797	 *   rx		inbound q of rx'd frames
798	 *   rx_free	pool of ixpbuf's for receiving frames
799	 *   tx		outbound q of frames to send
800	 *   tx_done	q of tx frames that have been processed
801	 *
802	 * The NPE handles the actual tx/rx process and the q manager
803	 * handles the queues.  The driver just writes entries to the
804	 * q manager mailbox's and gets callbacks when there are rx'd
805	 * frames to process or tx'd frames to reap.  These callbacks
806	 * are controlled by the q configurations; e.g. we get a
807	 * callback when tx_done has 2 or more frames to process and
808	 * when the rx q has at least one frame.  These setings can
809	 * changed at the time the q is configured.
810	 */
811	sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
812	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
813		IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
814	sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
815	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
816	/*
817	 * Setup the NPE to direct all traffic to rx_qid.
818	 * When QoS is enabled in the firmware there are
819	 * 8 traffic classes; otherwise just 4.
820	 */
821	for (i = 0; i < 8; i++)
822		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
823
824	/* disable firewall mode just in case (should be off) */
825	npe_setfirewallmode(sc, 0);
826
827	sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
828	sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
829	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
830	if (tx_doneqid == -1) {
831		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
832			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
833		tx_doneqid = sc->tx_doneqid;
834	}
835
836	KASSERT(npes[sc->sc_npeid] == NULL,
837	    ("npe %u already setup", sc->sc_npeid));
838	npes[sc->sc_npeid] = sc;
839
840	return 0;
841}
842
843static void
844npe_deactivate(device_t dev)
845{
846	struct npe_softc *sc = device_get_softc(dev);
847
848	npes[sc->sc_npeid] = NULL;
849
850	/* XXX disable q's */
851	if (sc->sc_npe != NULL) {
852		ixpnpe_stop(sc->sc_npe);
853		ixpnpe_detach(sc->sc_npe);
854	}
855	if (sc->sc_stats != NULL) {
856		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
857		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
858			sc->sc_stats_map);
859	}
860	if (sc->sc_stats_tag != NULL)
861		bus_dma_tag_destroy(sc->sc_stats_tag);
862	npe_dma_destroy(sc, &sc->txdma);
863	npe_dma_destroy(sc, &sc->rxdma);
864	bus_generic_detach(sc->sc_dev);
865	if (sc->sc_mii != NULL)
866		device_delete_child(sc->sc_dev, sc->sc_mii);
867}
868
869/*
870 * Change media according to request.
871 */
872static int
873npe_ifmedia_update(struct ifnet *ifp)
874{
875	struct npe_softc *sc = ifp->if_softc;
876	struct mii_data *mii;
877
878	mii = device_get_softc(sc->sc_mii);
879	NPE_LOCK(sc);
880	mii_mediachg(mii);
881	/* XXX push state ourself? */
882	NPE_UNLOCK(sc);
883	return (0);
884}
885
886/*
887 * Notify the world which media we're using.
888 */
889static void
890npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
891{
892	struct npe_softc *sc = ifp->if_softc;
893	struct mii_data *mii;
894
895	mii = device_get_softc(sc->sc_mii);
896	NPE_LOCK(sc);
897	mii_pollstat(mii);
898	ifmr->ifm_active = mii->mii_media_active;
899	ifmr->ifm_status = mii->mii_media_status;
900	NPE_UNLOCK(sc);
901}
902
903static void
904npe_addstats(struct npe_softc *sc)
905{
906#define	MIBADD(x)	sc->mibdata.x += be32toh(ns->x)
907	struct ifnet *ifp = sc->sc_ifp;
908	struct npestats *ns = sc->sc_stats;
909
910	MIBADD(dot3StatsAlignmentErrors);
911	MIBADD(dot3StatsFCSErrors);
912	MIBADD(dot3StatsSingleCollisionFrames);
913	MIBADD(dot3StatsMultipleCollisionFrames);
914	MIBADD(dot3StatsDeferredTransmissions);
915	MIBADD(dot3StatsLateCollisions);
916	MIBADD(dot3StatsExcessiveCollisions);
917	MIBADD(dot3StatsInternalMacTransmitErrors);
918	MIBADD(dot3StatsCarrierSenseErrors);
919	sc->mibdata.dot3StatsFrameTooLongs +=
920	      be32toh(ns->RxLargeFramesDiscards)
921	    + be32toh(ns->TxLargeFrameDiscards);
922	MIBADD(dot3StatsInternalMacReceiveErrors);
923	sc->mibdata.dot3StatsMissedFrames +=
924	      be32toh(ns->RxOverrunDiscards)
925	    + be32toh(ns->RxUnderflowEntryDiscards);
926
927	ifp->if_oerrors +=
928		  be32toh(ns->dot3StatsInternalMacTransmitErrors)
929		+ be32toh(ns->dot3StatsCarrierSenseErrors)
930		+ be32toh(ns->TxVLANIdFilterDiscards)
931		;
932	ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
933		+ be32toh(ns->dot3StatsInternalMacReceiveErrors)
934		+ be32toh(ns->RxOverrunDiscards)
935		+ be32toh(ns->RxUnderflowEntryDiscards)
936		;
937	ifp->if_collisions +=
938		  be32toh(ns->dot3StatsSingleCollisionFrames)
939		+ be32toh(ns->dot3StatsMultipleCollisionFrames)
940		;
941#undef MIBADD
942}
943
944static void
945npe_tick(void *xsc)
946{
947#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
948	struct npe_softc *sc = xsc;
949	struct mii_data *mii = device_get_softc(sc->sc_mii);
950	uint32_t msg[2];
951
952	NPE_ASSERT_LOCKED(sc);
953
954	/*
955	 * NB: to avoid sleeping with the softc lock held we
956	 * split the NPE msg processing into two parts.  The
957	 * request for statistics is sent w/o waiting for a
958	 * reply and then on the next tick we retrieve the
959	 * results.  This works because npe_tick is the only
960	 * code that talks via the mailbox's (except at setup).
961	 * This likely can be handled better.
962	 */
963	if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
964		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
965		    BUS_DMASYNC_POSTREAD);
966		npe_addstats(sc);
967	}
968	npe_updatestats(sc);
969	mii_tick(mii);
970
971	npewatchdog(sc);
972
973	/* schedule next poll */
974	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
975#undef ACK
976}
977
978static void
979npe_setmac(struct npe_softc *sc, u_char *eaddr)
980{
981	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
982	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
983	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
984	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
985	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
986	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
987
988}
989
990static void
991npe_getmac(struct npe_softc *sc, u_char *eaddr)
992{
993	/* NB: the unicast address appears to be loaded from EEPROM on reset */
994	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
995	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
996	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
997	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
998	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
999	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
1000}
1001
1002struct txdone {
1003	struct npebuf *head;
1004	struct npebuf **tail;
1005	int count;
1006};
1007
1008static __inline void
1009npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
1010{
1011	struct ifnet *ifp = sc->sc_ifp;
1012
1013	NPE_LOCK(sc);
1014	*td->tail = sc->tx_free;
1015	sc->tx_free = td->head;
1016	/*
1017	 * We're no longer busy, so clear the busy flag and call the
1018	 * start routine to xmit more packets.
1019	 */
1020	ifp->if_opackets += td->count;
1021	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1022	sc->npe_watchdog_timer = 0;
1023	npestart_locked(ifp);
1024	NPE_UNLOCK(sc);
1025}
1026
1027/*
1028 * Q manager callback on tx done queue.  Reap mbufs
1029 * and return tx buffers to the free list.  Finally
1030 * restart output.  Note the microcode has only one
1031 * txdone q wired into it so we must use the NPE ID
1032 * returned with each npehwbuf to decide where to
1033 * send buffers.
1034 */
1035static void
1036npe_txdone(int qid, void *arg)
1037{
1038#define	P2V(a, dma) \
1039	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1040	struct npe_softc *sc0 = arg;
1041	struct npe_softc *sc;
1042	struct npebuf *npe;
1043	struct txdone *td, q[NPE_MAX];
1044	uint32_t entry;
1045
1046	/* XXX no NPE-A support */
1047	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
1048	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
1049	/* XXX max # at a time? */
1050	while (ixpqmgr_qread(qid, &entry) == 0) {
1051		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
1052		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
1053
1054		sc = npes[NPE_QM_Q_NPE(entry)];
1055		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
1056		m_freem(npe->ix_m);
1057		npe->ix_m = NULL;
1058
1059		td = &q[NPE_QM_Q_NPE(entry)];
1060		*td->tail = npe;
1061		td->tail = &npe->ix_next;
1062		td->count++;
1063	}
1064
1065	if (q[NPE_B].count)
1066		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
1067	if (q[NPE_C].count)
1068		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
1069#undef P2V
1070}
1071
1072static int
1073npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
1074{
1075	bus_dma_segment_t segs[1];
1076	struct npedma *dma = &sc->rxdma;
1077	struct npehwbuf *hw;
1078	int error, nseg;
1079
1080	if (m == NULL) {
1081		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1082		if (m == NULL)
1083			return ENOBUFS;
1084	}
1085	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
1086		("ext_size %d", m->m_ext.ext_size));
1087	m->m_pkthdr.len = m->m_len = 1536;
1088	/* backload payload and align ip hdr */
1089	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
1090	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
1091			segs, &nseg, 0);
1092	if (error != 0) {
1093		m_freem(m);
1094		return error;
1095	}
1096	hw = npe->ix_hw;
1097	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1098	/* NB: NPE requires length be a multiple of 64 */
1099	/* NB: buffer length is shifted in word */
1100	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1101	hw->ix_ne[0].next = 0;
1102	npe->ix_m = m;
1103	/* Flush the memory in the mbuf */
1104	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1105	return 0;
1106}
1107
1108/*
1109 * RX q processing for a specific NPE.  Claim entries
1110 * from the hardware queue and pass the frames up the
1111 * stack. Pass the rx buffers to the free list.
1112 */
1113static void
1114npe_rxdone(int qid, void *arg)
1115{
1116#define	P2V(a, dma) \
1117	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1118	struct npe_softc *sc = arg;
1119	struct npedma *dma = &sc->rxdma;
1120	uint32_t entry;
1121
1122	while (ixpqmgr_qread(qid, &entry) == 0) {
1123		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1124		struct mbuf *m;
1125
1126		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1127		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1128		/*
1129		 * Allocate a new mbuf to replenish the rx buffer.
1130		 * If doing so fails we drop the rx'd frame so we
1131		 * can reuse the previous mbuf.  When we're able to
1132		 * allocate a new mbuf dispatch the mbuf w/ rx'd
1133		 * data up the stack and replace it with the newly
1134		 * allocated one.
1135		 */
1136		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1137		if (m != NULL) {
1138			struct mbuf *mrx = npe->ix_m;
1139			struct npehwbuf *hw = npe->ix_hw;
1140			struct ifnet *ifp = sc->sc_ifp;
1141
1142			/* Flush mbuf memory for rx'd data */
1143			bus_dmamap_sync(dma->mtag, npe->ix_map,
1144			    BUS_DMASYNC_POSTREAD);
1145
1146			/* XXX flush hw buffer; works now 'cuz coherent */
1147			/* set m_len etc. per rx frame size */
1148			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1149			mrx->m_pkthdr.len = mrx->m_len;
1150			mrx->m_pkthdr.rcvif = ifp;
1151			mrx->m_flags |= M_HASFCS;
1152
1153			ifp->if_ipackets++;
1154			ifp->if_input(ifp, mrx);
1155		} else {
1156			/* discard frame and re-use mbuf */
1157			m = npe->ix_m;
1158		}
1159		if (npe_rxbuf_init(sc, npe, m) == 0) {
1160			/* return npe buf to rx free list */
1161			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1162		} else {
1163			/* XXX should not happen */
1164		}
1165	}
1166#undef P2V
1167}
1168
1169#ifdef DEVICE_POLLING
1170static void
1171npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1172{
1173	struct npe_softc *sc = ifp->if_softc;
1174
1175	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1176		npe_rxdone(sc->rx_qid, sc);
1177		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1178	}
1179}
1180#endif /* DEVICE_POLLING */
1181
1182static void
1183npe_startxmit(struct npe_softc *sc)
1184{
1185	struct npedma *dma = &sc->txdma;
1186	int i;
1187
1188	NPE_ASSERT_LOCKED(sc);
1189	sc->tx_free = NULL;
1190	for (i = 0; i < dma->nbuf; i++) {
1191		struct npebuf *npe = &dma->buf[i];
1192		if (npe->ix_m != NULL) {
1193			/* NB: should not happen */
1194			device_printf(sc->sc_dev,
1195			    "%s: free mbuf at entry %u\n", __func__, i);
1196			m_freem(npe->ix_m);
1197		}
1198		npe->ix_m = NULL;
1199		npe->ix_next = sc->tx_free;
1200		sc->tx_free = npe;
1201	}
1202}
1203
1204static void
1205npe_startrecv(struct npe_softc *sc)
1206{
1207	struct npedma *dma = &sc->rxdma;
1208	struct npebuf *npe;
1209	int i;
1210
1211	NPE_ASSERT_LOCKED(sc);
1212	for (i = 0; i < dma->nbuf; i++) {
1213		npe = &dma->buf[i];
1214		npe_rxbuf_init(sc, npe, npe->ix_m);
1215		/* set npe buf on rx free list */
1216		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1217	}
1218}
1219
1220/*
1221 * Reset and initialize the chip
1222 */
1223static void
1224npeinit_locked(void *xsc)
1225{
1226	struct npe_softc *sc = xsc;
1227	struct ifnet *ifp = sc->sc_ifp;
1228
1229	NPE_ASSERT_LOCKED(sc);
1230if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1231
1232	/*
1233	 * Reset MAC core.
1234	 */
1235	npe_mac_reset(sc);
1236
1237	/* disable transmitter and reciver in the MAC */
1238 	WR4(sc, NPE_MAC_RX_CNTRL1,
1239	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1240 	WR4(sc, NPE_MAC_TX_CNTRL1,
1241	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1242
1243	/*
1244	 * Set the MAC core registers.
1245	 */
1246	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1247	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1248	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1249	/* thresholds determined by NPE firmware FS */
1250	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1251	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1252	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1253	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1254	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1255	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1256	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1257	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1258
1259	WR4(sc, NPE_MAC_TX_CNTRL1,
1260		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1261		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1262		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1263		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1264	/* XXX pad strip? */
1265	WR4(sc, NPE_MAC_RX_CNTRL1,
1266		  NPE_RX_CNTRL1_CRC_EN		/* include CRC/FCS */
1267		| NPE_RX_CNTRL1_PAUSE_EN);	/* ena pause frame handling */
1268	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1269
1270	npe_setmac(sc, IF_LLADDR(ifp));
1271	npe_setmcast(sc);
1272
1273	npe_startxmit(sc);
1274	npe_startrecv(sc);
1275
1276	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1277	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1278	sc->npe_watchdog_timer = 0;		/* just in case */
1279
1280	/* enable transmitter and reciver in the MAC */
1281 	WR4(sc, NPE_MAC_RX_CNTRL1,
1282	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1283 	WR4(sc, NPE_MAC_TX_CNTRL1,
1284	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1285
1286	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1287}
1288
1289static void
1290npeinit(void *xsc)
1291{
1292	struct npe_softc *sc = xsc;
1293	NPE_LOCK(sc);
1294	npeinit_locked(sc);
1295	NPE_UNLOCK(sc);
1296}
1297
1298/*
1299 * Dequeue packets and place on the h/w transmit queue.
1300 */
1301static void
1302npestart_locked(struct ifnet *ifp)
1303{
1304	struct npe_softc *sc = ifp->if_softc;
1305	struct npebuf *npe;
1306	struct npehwbuf *hw;
1307	struct mbuf *m, *n;
1308	struct npedma *dma = &sc->txdma;
1309	bus_dma_segment_t segs[NPE_MAXSEG];
1310	int nseg, len, error, i;
1311	uint32_t next;
1312
1313	NPE_ASSERT_LOCKED(sc);
1314	/* XXX can this happen? */
1315	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1316		return;
1317
1318	while (sc->tx_free != NULL) {
1319		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1320		if (m == NULL) {
1321			/* XXX? */
1322			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1323			return;
1324		}
1325		npe = sc->tx_free;
1326		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1327		    m, segs, &nseg, 0);
1328		if (error == EFBIG) {
1329			n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
1330			if (n == NULL) {
1331				if_printf(ifp, "%s: too many fragments %u\n",
1332				    __func__, nseg);
1333				m_freem(m);
1334				return;	/* XXX? */
1335			}
1336			m = n;
1337			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1338			    m, segs, &nseg, 0);
1339		}
1340		if (error != 0 || nseg == 0) {
1341			if_printf(ifp, "%s: error %u nseg %u\n",
1342			    __func__, error, nseg);
1343			m_freem(m);
1344			return;	/* XXX? */
1345		}
1346		sc->tx_free = npe->ix_next;
1347
1348		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1349
1350		/*
1351		 * Tap off here if there is a bpf listener.
1352		 */
1353		BPF_MTAP(ifp, m);
1354
1355		npe->ix_m = m;
1356		hw = npe->ix_hw;
1357		len = m->m_pkthdr.len;
1358		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1359		for (i = 0; i < nseg; i++) {
1360			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1361			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1362			hw->ix_ne[i].next = htobe32(next);
1363
1364			len = 0;		/* zero for segments > 1 */
1365			next += sizeof(hw->ix_ne[0]);
1366		}
1367		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1368		/* XXX flush descriptor instead of using uncached memory */
1369
1370		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1371		    __func__, sc->tx_qid, npe->ix_neaddr,
1372		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1373		/* stick it on the tx q */
1374		/* XXX add vlan priority */
1375		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1376
1377		sc->npe_watchdog_timer = 5;
1378	}
1379	if (sc->tx_free == NULL)
1380		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1381}
1382
1383void
1384npestart(struct ifnet *ifp)
1385{
1386	struct npe_softc *sc = ifp->if_softc;
1387	NPE_LOCK(sc);
1388	npestart_locked(ifp);
1389	NPE_UNLOCK(sc);
1390}
1391
1392static void
1393npe_stopxmit(struct npe_softc *sc)
1394{
1395	struct npedma *dma = &sc->txdma;
1396	int i;
1397
1398	NPE_ASSERT_LOCKED(sc);
1399
1400	/* XXX qmgr */
1401	for (i = 0; i < dma->nbuf; i++) {
1402		struct npebuf *npe = &dma->buf[i];
1403
1404		if (npe->ix_m != NULL) {
1405			bus_dmamap_unload(dma->mtag, npe->ix_map);
1406			m_freem(npe->ix_m);
1407			npe->ix_m = NULL;
1408		}
1409	}
1410}
1411
1412static void
1413npe_stoprecv(struct npe_softc *sc)
1414{
1415	struct npedma *dma = &sc->rxdma;
1416	int i;
1417
1418	NPE_ASSERT_LOCKED(sc);
1419
1420	/* XXX qmgr */
1421	for (i = 0; i < dma->nbuf; i++) {
1422		struct npebuf *npe = &dma->buf[i];
1423
1424		if (npe->ix_m != NULL) {
1425			bus_dmamap_unload(dma->mtag, npe->ix_map);
1426			m_freem(npe->ix_m);
1427			npe->ix_m = NULL;
1428		}
1429	}
1430}
1431
1432/*
1433 * Turn off interrupts, and stop the nic.
1434 */
1435void
1436npestop(struct npe_softc *sc)
1437{
1438	struct ifnet *ifp = sc->sc_ifp;
1439
1440	/*  disable transmitter and reciver in the MAC  */
1441 	WR4(sc, NPE_MAC_RX_CNTRL1,
1442	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1443 	WR4(sc, NPE_MAC_TX_CNTRL1,
1444	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1445
1446	sc->npe_watchdog_timer = 0;
1447	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1448
1449	callout_stop(&sc->tick_ch);
1450
1451	npe_stopxmit(sc);
1452	npe_stoprecv(sc);
1453	/* XXX go into loopback & drain q's? */
1454	/* XXX but beware of disabling tx above */
1455
1456	/*
1457	 * The MAC core rx/tx disable may leave the MAC hardware in an
1458	 * unpredictable state. A hw reset is executed before resetting
1459	 * all the MAC parameters to a known value.
1460	 */
1461	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1462	DELAY(NPE_MAC_RESET_DELAY);
1463	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1464	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1465}
1466
1467void
1468npewatchdog(struct npe_softc *sc)
1469{
1470	NPE_ASSERT_LOCKED(sc);
1471
1472	if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1473		return;
1474
1475	device_printf(sc->sc_dev, "watchdog timeout\n");
1476	sc->sc_ifp->if_oerrors++;
1477
1478	npeinit_locked(sc);
1479}
1480
1481static int
1482npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1483{
1484	struct npe_softc *sc = ifp->if_softc;
1485 	struct mii_data *mii;
1486 	struct ifreq *ifr = (struct ifreq *)data;
1487	int error = 0;
1488#ifdef DEVICE_POLLING
1489	int mask;
1490#endif
1491
1492	switch (cmd) {
1493	case SIOCSIFFLAGS:
1494		NPE_LOCK(sc);
1495		if ((ifp->if_flags & IFF_UP) == 0 &&
1496		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1497			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1498			npestop(sc);
1499		} else {
1500			/* reinitialize card on any parameter change */
1501			npeinit_locked(sc);
1502		}
1503		NPE_UNLOCK(sc);
1504		break;
1505
1506	case SIOCADDMULTI:
1507	case SIOCDELMULTI:
1508		/* update multicast filter list. */
1509		NPE_LOCK(sc);
1510		npe_setmcast(sc);
1511		NPE_UNLOCK(sc);
1512		error = 0;
1513		break;
1514
1515  	case SIOCSIFMEDIA:
1516  	case SIOCGIFMEDIA:
1517 		mii = device_get_softc(sc->sc_mii);
1518 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1519  		break;
1520
1521#ifdef DEVICE_POLLING
1522	case SIOCSIFCAP:
1523		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1524		if (mask & IFCAP_POLLING) {
1525			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1526				error = ether_poll_register(npe_poll, ifp);
1527				if (error)
1528					return error;
1529				NPE_LOCK(sc);
1530				/* disable callbacks XXX txdone is shared */
1531				ixpqmgr_notify_disable(sc->rx_qid);
1532				ixpqmgr_notify_disable(sc->tx_doneqid);
1533				ifp->if_capenable |= IFCAP_POLLING;
1534				NPE_UNLOCK(sc);
1535			} else {
1536				error = ether_poll_deregister(ifp);
1537				/* NB: always enable qmgr callbacks */
1538				NPE_LOCK(sc);
1539				/* enable qmgr callbacks */
1540				ixpqmgr_notify_enable(sc->rx_qid,
1541				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1542				ixpqmgr_notify_enable(sc->tx_doneqid,
1543				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1544				ifp->if_capenable &= ~IFCAP_POLLING;
1545				NPE_UNLOCK(sc);
1546			}
1547		}
1548		break;
1549#endif
1550	default:
1551		error = ether_ioctl(ifp, cmd, data);
1552		break;
1553	}
1554	return error;
1555}
1556
1557/*
1558 * Setup a traffic class -> rx queue mapping.
1559 */
1560static int
1561npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1562{
1563	uint32_t msg[2];
1564
1565	msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
1566	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1567	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1568}
1569
1570static int
1571npe_setfirewallmode(struct npe_softc *sc, int onoff)
1572{
1573	uint32_t msg[2];
1574
1575	/* XXX honor onoff */
1576	msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
1577	msg[1] = 0;
1578	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1579}
1580
1581/*
1582 * Update and reset the statistics in the NPE.
1583 */
1584static int
1585npe_updatestats(struct npe_softc *sc)
1586{
1587	uint32_t msg[2];
1588
1589	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1590	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1591	return ixpnpe_sendmsg_async(sc->sc_npe, msg);
1592}
1593
1594#if 0
1595/*
1596 * Get the current statistics block.
1597 */
1598static int
1599npe_getstats(struct npe_softc *sc)
1600{
1601	uint32_t msg[2];
1602
1603	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1604	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1605	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1606}
1607
1608/*
1609 * Query the image id of the loaded firmware.
1610 */
1611static uint32_t
1612npe_getimageid(struct npe_softc *sc)
1613{
1614	uint32_t msg[2];
1615
1616	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1617	msg[1] = 0;
1618	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1619}
1620
1621/*
1622 * Enable/disable loopback.
1623 */
1624static int
1625npe_setloopback(struct npe_softc *sc, int ena)
1626{
1627	uint32_t msg[2];
1628
1629	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1630	msg[1] = 0;
1631	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1632}
1633#endif
1634
1635static void
1636npe_child_detached(device_t dev, device_t child)
1637{
1638	struct npe_softc *sc;
1639
1640	sc = device_get_softc(dev);
1641	if (child == sc->sc_mii)
1642		sc->sc_mii = NULL;
1643}
1644
1645/*
1646 * MII bus support routines.
1647 */
1648#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1649#define	MII_WR4(sc, reg, v) \
1650	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1651
1652static uint32_t
1653npe_mii_mdio_read(struct npe_softc *sc, int reg)
1654{
1655	uint32_t v;
1656
1657	/* NB: registers are known to be sequential */
1658	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1659	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1660	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1661	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1662	return v;
1663}
1664
1665static void
1666npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1667{
1668	/* NB: registers are known to be sequential */
1669	MII_WR4(sc, reg+0, cmd & 0xff);
1670	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1671	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1672	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1673}
1674
1675static int
1676npe_mii_mdio_wait(struct npe_softc *sc)
1677{
1678	uint32_t v;
1679	int i;
1680
1681	/* NB: typically this takes 25-30 trips */
1682	for (i = 0; i < 1000; i++) {
1683		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1684		if ((v & NPE_MII_GO) == 0)
1685			return 1;
1686		DELAY(1);
1687	}
1688	device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
1689	    __func__, v);
1690	return 0;		/* NB: timeout */
1691}
1692
1693static int
1694npe_miibus_readreg(device_t dev, int phy, int reg)
1695{
1696	struct npe_softc *sc = device_get_softc(dev);
1697	uint32_t v;
1698
1699	if (phy != sc->sc_phy)		/* XXX no auto-detect */
1700		return 0xffff;
1701	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
1702	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1703	if (npe_mii_mdio_wait(sc))
1704		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1705	else
1706		v = 0xffff | NPE_MII_READ_FAIL;
1707	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1708}
1709
1710static void
1711npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1712{
1713	struct npe_softc *sc = device_get_softc(dev);
1714	uint32_t v;
1715
1716	if (phy != sc->sc_phy)		/* XXX */
1717		return;
1718	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1719	  | data | NPE_MII_WRITE
1720	  | NPE_MII_GO;
1721	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1722	/* XXX complain about timeout */
1723	(void) npe_mii_mdio_wait(sc);
1724}
1725
1726static void
1727npe_miibus_statchg(device_t dev)
1728{
1729	struct npe_softc *sc = device_get_softc(dev);
1730	struct mii_data *mii = device_get_softc(sc->sc_mii);
1731	uint32_t tx1, rx1;
1732
1733	/* sync MAC duplex state */
1734	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1735	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1736	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1737		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1738		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1739	} else {
1740		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1741		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1742	}
1743	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1744	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1745}
1746
1747static device_method_t npe_methods[] = {
1748	/* Device interface */
1749	DEVMETHOD(device_probe,		npe_probe),
1750	DEVMETHOD(device_attach,	npe_attach),
1751	DEVMETHOD(device_detach,	npe_detach),
1752
1753	/* Bus interface */
1754	DEVMETHOD(bus_child_detached,	npe_child_detached),
1755
1756	/* MII interface */
1757	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1758	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1759	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1760
1761	{ 0, 0 }
1762};
1763
1764static driver_t npe_driver = {
1765	"npe",
1766	npe_methods,
1767	sizeof(struct npe_softc),
1768};
1769
1770DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1771DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1772MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1773MODULE_DEPEND(npe, miibus, 1, 1, 1);
1774MODULE_DEPEND(npe, ether, 1, 1, 1);
1775