if_npe.c revision 192660
1/*-
2 * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 192660 2009-05-23 19:14:20Z sam $");
27
28/*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 */
43#ifdef HAVE_KERNEL_OPTION_HEADERS
44#include "opt_device_polling.h"
45#endif
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/kernel.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/module.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/endian.h>
59#include <machine/bus.h>
60
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_mib.h>
67#include <net/if_types.h>
68
69#ifdef INET
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/in_var.h>
73#include <netinet/ip.h>
74#endif
75
76#include <net/bpf.h>
77#include <net/bpfdesc.h>
78
79#include <arm/xscale/ixp425/ixp425reg.h>
80#include <arm/xscale/ixp425/ixp425var.h>
81#include <arm/xscale/ixp425/ixp425_qmgr.h>
82#include <arm/xscale/ixp425/ixp425_npevar.h>
83
84#include <dev/mii/mii.h>
85#include <dev/mii/miivar.h>
86#include <arm/xscale/ixp425/if_npereg.h>
87
88#include <machine/armreg.h>
89
90#include "miibus_if.h"
91
92/*
93 * XXX: For the main bus dma tag. Can go away if the new method to get the
94 * dma tag from the parent got MFC'd into RELENG_6.
95 */
96extern struct ixp425_softc *ixp425_softc;
97
98struct npebuf {
99	struct npebuf	*ix_next;	/* chain to next buffer */
100	void		*ix_m;		/* backpointer to mbuf */
101	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
102	struct npehwbuf	*ix_hw;		/* associated h/w block */
103	uint32_t	ix_neaddr;	/* phys address of ix_hw */
104};
105
106struct npedma {
107	const char*	name;
108	int		nbuf;		/* # npebuf's allocated */
109	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
110	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
111	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
112	bus_dmamap_t	buf_map;
113	bus_addr_t	buf_phys;	/* phys addr of buffers */
114	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
115};
116
117struct npe_softc {
118	/* XXX mii requires this be first; do not move! */
119	struct ifnet	*sc_ifp;	/* ifnet pointer */
120	struct mtx	sc_mtx;		/* basically a perimeter lock */
121	device_t	sc_dev;
122	bus_space_tag_t	sc_iot;
123	bus_space_handle_t sc_ioh;	/* MAC register window */
124	device_t	sc_mii;		/* child miibus */
125	bus_space_handle_t sc_miih;	/* MII register window */
126	int		sc_npeid;
127	struct ixpnpe_softc *sc_npe;	/* NPE support */
128	int		sc_debug;	/* DPRINTF* control */
129	int		sc_tickinterval;
130	struct callout	tick_ch;	/* Tick callout */
131	int		npe_watchdog_timer;
132	struct npedma	txdma;
133	struct npebuf	*tx_free;	/* list of free tx buffers */
134	struct npedma	rxdma;
135	bus_addr_t	buf_phys;	/* XXX for returning a value */
136	int		rx_qid;		/* rx qid */
137	int		rx_freeqid;	/* rx free buffers qid */
138	int		tx_qid;		/* tx qid */
139	int		tx_doneqid;	/* tx completed qid */
140	int		sc_phy;		/* PHY id */
141	struct ifmib_iso_8802_3 mibdata;
142	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
143	struct npestats	*sc_stats;
144	bus_dmamap_t	sc_stats_map;
145	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
146	struct npestats	sc_totals;	/* accumulated sc_stats */
147};
148
149/*
150 * Static configuration for IXP425.  The tx and
151 * rx free Q id's are fixed by the NPE microcode.  The
152 * rx Q id's are programmed to be separate to simplify
153 * multi-port processing.  It may be better to handle
154 * all traffic through one Q (as done by the Intel drivers).
155 *
156 * Note that the PHY's are accessible only from MAC A
157 * on the IXP425.  This and other platform-specific
158 * assumptions probably need to be handled through hints.
159 */
160static const struct {
161	uint32_t	macbase;
162	uint32_t	miibase;
163	int		phy;		/* phy id */
164	uint8_t		rx_qid;
165	uint8_t		rx_freeqid;
166	uint8_t		tx_qid;
167	uint8_t		tx_doneqid;
168} npeconfig[NPE_MAX] = {
169	[NPE_A] = {
170	  .macbase	= IXP435_MAC_A_HWBASE,
171	  .miibase	= IXP425_MAC_C_HWBASE,
172	  .phy		= 2,
173	  .rx_qid	= 4,
174	  .rx_freeqid	= 26,
175	  .tx_qid	= 23,
176	  .tx_doneqid	= 31
177	},
178	[NPE_B] = {
179	  .macbase	= IXP425_MAC_B_HWBASE,
180	  .miibase	= IXP425_MAC_C_HWBASE,
181	  .phy		= 0,
182	  .rx_qid	= 4,
183	  .rx_freeqid	= 27,
184	  .tx_qid	= 24,
185	  .tx_doneqid	= 31
186	},
187	[NPE_C] = {
188	  .macbase	= IXP425_MAC_C_HWBASE,
189	  .miibase	= IXP425_MAC_C_HWBASE,
190	  .phy		= 1,
191	  .rx_qid	= 12,
192	  .rx_freeqid	= 28,
193	  .tx_qid	= 25,
194	  .tx_doneqid	= 31
195	},
196};
197static struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
198
199static __inline uint32_t
200RD4(struct npe_softc *sc, bus_size_t off)
201{
202	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
203}
204
205static __inline void
206WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
207{
208	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
209}
210
211#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
212#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
213#define NPE_LOCK_INIT(_sc) \
214	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
215	    MTX_NETWORK_LOCK, MTX_DEF)
216#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
217#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
218#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
219
220static devclass_t npe_devclass;
221
222static int	override_npeid(device_t, const char *resname, int *val);
223static int	npe_activate(device_t dev);
224static void	npe_deactivate(device_t dev);
225static int	npe_ifmedia_update(struct ifnet *ifp);
226static void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
227static void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
228static void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
229static void	npe_txdone(int qid, void *arg);
230static int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
231			struct mbuf *);
232static void	npe_rxdone(int qid, void *arg);
233static void	npeinit(void *);
234static void	npestart_locked(struct ifnet *);
235static void	npestart(struct ifnet *);
236static void	npestop(struct npe_softc *);
237static void	npewatchdog(struct npe_softc *);
238static int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
239
240static int	npe_setrxqosentry(struct npe_softc *, int classix,
241			int trafclass, int qid);
242static int	npe_setfirewallmode(struct npe_softc *, int onoff);
243static int	npe_updatestats(struct npe_softc *);
244#if 0
245static int	npe_getstats(struct npe_softc *);
246static uint32_t	npe_getimageid(struct npe_softc *);
247static int	npe_setloopback(struct npe_softc *, int ena);
248#endif
249
250/* NB: all tx done processing goes through one queue */
251static int tx_doneqid = -1;
252
253SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP4XX NPE driver parameters");
254
255static int npe_debug = 0;
256SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
257	   0, "IXP4XX NPE network interface debug msgs");
258TUNABLE_INT("hw.npe.debug", &npe_debug);
259#define	DPRINTF(sc, fmt, ...) do {					\
260	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
261} while (0)
262#define	DPRINTFn(n, sc, fmt, ...) do {					\
263	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
264} while (0)
265static int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
266SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
267	    0, "periodic work interval (secs)");
268TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
269
270static	int npe_rxbuf = 64;		/* # rx buffers to allocate */
271SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
272	    0, "rx buffers allocated");
273TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
274static	int npe_txbuf = 128;		/* # tx buffers to allocate */
275SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
276	    0, "tx buffers allocated");
277TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
278
279static int
280unit2npeid(int unit)
281{
282	static const int npeidmap[2][3] = {
283		/* on 425 A is for HSS, B & C are for Ethernet */
284		{ NPE_B, NPE_C, -1 },	/* IXP425 */
285		/* 435 only has A & C, order C then A */
286		{ NPE_C, NPE_A, -1 },	/* IXP435 */
287	};
288	/* XXX check feature register instead */
289	return (unit < 3 ? npeidmap[
290	    (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
291}
292
293static int
294npe_probe(device_t dev)
295{
296	static const char *desc[NPE_MAX] = {
297		[NPE_A] = "IXP NPE-A",
298		[NPE_B] = "IXP NPE-B",
299		[NPE_C] = "IXP NPE-C"
300	};
301	int unit = device_get_unit(dev);
302	int npeid;
303
304	if (unit > 2 ||
305	    (ixp4xx_read_feature_bits() &
306	     (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
307		return EINVAL;
308
309	npeid = -1;
310	if (!override_npeid(dev, "npeid", &npeid))
311		npeid = unit2npeid(unit);
312	if (npeid == -1) {
313		device_printf(dev, "unit %d not supported\n", unit);
314		return EINVAL;
315	}
316	device_set_desc(dev, desc[npeid]);
317	return 0;
318}
319
320static int
321npe_attach(device_t dev)
322{
323	struct npe_softc *sc = device_get_softc(dev);
324	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
325	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
326	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
327	struct ifnet *ifp;
328	int error;
329	u_char eaddr[6];
330
331	sc->sc_dev = dev;
332	sc->sc_iot = sa->sc_iot;
333	NPE_LOCK_INIT(sc);
334	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
335	sc->sc_debug = npe_debug;
336	sc->sc_tickinterval = npe_tickinterval;
337
338	ifp = if_alloc(IFT_ETHER);
339	if (ifp == NULL) {
340		device_printf(dev, "cannot allocate ifnet\n");
341		error = EIO;		/* XXX */
342		goto out;
343	}
344	/* NB: must be setup prior to invoking mii code */
345	sc->sc_ifp = ifp;
346
347	error = npe_activate(dev);
348	if (error) {
349		device_printf(dev, "cannot activate npe\n");
350		goto out;
351	}
352
353	npe_getmac(sc, eaddr);
354
355	ifp->if_softc = sc;
356	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
357	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
358	ifp->if_start = npestart;
359	ifp->if_ioctl = npeioctl;
360	ifp->if_init = npeinit;
361	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
362	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
363	IFQ_SET_READY(&ifp->if_snd);
364	ifp->if_linkmib = &sc->mibdata;
365	ifp->if_linkmiblen = sizeof(sc->mibdata);
366	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
367	/* device supports oversided vlan frames */
368	ifp->if_capabilities |= IFCAP_VLAN_MTU;
369	ifp->if_capenable = ifp->if_capabilities;
370#ifdef DEVICE_POLLING
371	ifp->if_capabilities |= IFCAP_POLLING;
372#endif
373
374	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
375	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
376	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
377	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
378	SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
379	    CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats");
380
381	ether_ifattach(ifp, eaddr);
382	return 0;
383out:
384	if (ifp != NULL)
385		if_free(ifp);
386	NPE_LOCK_DESTROY(sc);
387	npe_deactivate(dev);
388	return error;
389}
390
391static int
392npe_detach(device_t dev)
393{
394	struct npe_softc *sc = device_get_softc(dev);
395	struct ifnet *ifp = sc->sc_ifp;
396
397#ifdef DEVICE_POLLING
398	if (ifp->if_capenable & IFCAP_POLLING)
399		ether_poll_deregister(ifp);
400#endif
401	npestop(sc);
402	if (ifp != NULL) {
403		ether_ifdetach(ifp);
404		if_free(ifp);
405	}
406	NPE_LOCK_DESTROY(sc);
407	npe_deactivate(dev);
408	return 0;
409}
410
411/*
412 * Compute and install the multicast filter.
413 */
414static void
415npe_setmcast(struct npe_softc *sc)
416{
417	struct ifnet *ifp = sc->sc_ifp;
418	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
419	int i;
420
421	if (ifp->if_flags & IFF_PROMISC) {
422		memset(mask, 0, ETHER_ADDR_LEN);
423		memset(addr, 0, ETHER_ADDR_LEN);
424	} else if (ifp->if_flags & IFF_ALLMULTI) {
425		static const uint8_t allmulti[ETHER_ADDR_LEN] =
426		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
427		memcpy(mask, allmulti, ETHER_ADDR_LEN);
428		memcpy(addr, allmulti, ETHER_ADDR_LEN);
429	} else {
430		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
431		struct ifmultiaddr *ifma;
432		const uint8_t *mac;
433
434		memset(clr, 0, ETHER_ADDR_LEN);
435		memset(set, 0xff, ETHER_ADDR_LEN);
436
437		IF_ADDR_LOCK(ifp);
438		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
439			if (ifma->ifma_addr->sa_family != AF_LINK)
440				continue;
441			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
442			for (i = 0; i < ETHER_ADDR_LEN; i++) {
443				clr[i] |= mac[i];
444				set[i] &= mac[i];
445			}
446		}
447		IF_ADDR_UNLOCK(ifp);
448
449		for (i = 0; i < ETHER_ADDR_LEN; i++) {
450			mask[i] = set[i] | ~clr[i];
451			addr[i] = set[i];
452		}
453	}
454
455	/*
456	 * Write the mask and address registers.
457	 */
458	for (i = 0; i < ETHER_ADDR_LEN; i++) {
459		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
460		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
461	}
462}
463
464static void
465npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
466{
467	struct npe_softc *sc;
468
469	if (error != 0)
470		return;
471	sc = (struct npe_softc *)arg;
472	sc->buf_phys = segs[0].ds_addr;
473}
474
475static int
476npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
477	const char *name, int nbuf, int maxseg)
478{
479	int error, i;
480
481	memset(dma, 0, sizeof(*dma));
482
483	dma->name = name;
484	dma->nbuf = nbuf;
485
486	/* DMA tag for mapped mbufs  */
487	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
488	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
489	    MCLBYTES, maxseg, MCLBYTES, 0,
490	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
491	if (error != 0) {
492		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
493		     "error %u\n", dma->name, error);
494		return error;
495	}
496
497	/* DMA tag and map for the NPE buffers */
498	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
499	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
500	    nbuf * sizeof(struct npehwbuf), 1,
501	    nbuf * sizeof(struct npehwbuf), 0,
502	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
503	if (error != 0) {
504		device_printf(sc->sc_dev,
505		    "unable to create %s npebuf dma tag, error %u\n",
506		    dma->name, error);
507		return error;
508	}
509	/* XXX COHERENT for now */
510	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
511	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
512	    &dma->buf_map) != 0) {
513		device_printf(sc->sc_dev,
514		     "unable to allocate memory for %s h/w buffers, error %u\n",
515		     dma->name, error);
516		return error;
517	}
518	/* XXX M_TEMP */
519	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
520	if (dma->buf == NULL) {
521		device_printf(sc->sc_dev,
522		     "unable to allocate memory for %s s/w buffers\n",
523		     dma->name);
524		return error;
525	}
526	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
527	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
528		device_printf(sc->sc_dev,
529		     "unable to map memory for %s h/w buffers, error %u\n",
530		     dma->name, error);
531		return error;
532	}
533	dma->buf_phys = sc->buf_phys;
534	for (i = 0; i < dma->nbuf; i++) {
535		struct npebuf *npe = &dma->buf[i];
536		struct npehwbuf *hw = &dma->hwbuf[i];
537
538		/* calculate offset to shared area */
539		npe->ix_neaddr = dma->buf_phys +
540			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
541		KASSERT((npe->ix_neaddr & 0x1f) == 0,
542		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
543		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
544				&npe->ix_map);
545		if (error != 0) {
546			device_printf(sc->sc_dev,
547			     "unable to create dmamap for %s buffer %u, "
548			     "error %u\n", dma->name, i, error);
549			return error;
550		}
551		npe->ix_hw = hw;
552	}
553	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
554	return 0;
555}
556
557static void
558npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
559{
560	int i;
561
562	if (dma->hwbuf != NULL) {
563		for (i = 0; i < dma->nbuf; i++) {
564			struct npebuf *npe = &dma->buf[i];
565			bus_dmamap_destroy(dma->mtag, npe->ix_map);
566		}
567		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
568		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
569	}
570	if (dma->buf != NULL)
571		free(dma->buf, M_TEMP);
572	if (dma->buf_tag)
573		bus_dma_tag_destroy(dma->buf_tag);
574	if (dma->mtag)
575		bus_dma_tag_destroy(dma->mtag);
576	memset(dma, 0, sizeof(*dma));
577}
578
579static int
580override_addr(device_t dev, const char *resname, int *base)
581{
582	int unit = device_get_unit(dev);
583	const char *resval;
584
585	/* XXX warn for wrong hint type */
586	if (resource_string_value("npe", unit, resname, &resval) != 0)
587		return 0;
588	switch (resval[0]) {
589	case 'A':
590		*base = IXP435_MAC_A_HWBASE;
591		break;
592	case 'B':
593		*base = IXP425_MAC_B_HWBASE;
594		break;
595	case 'C':
596		*base = IXP425_MAC_C_HWBASE;
597		break;
598	default:
599		device_printf(dev, "Warning, bad value %s for "
600		    "npe.%d.%s ignored\n", resval, unit, resname);
601		return 0;
602	}
603	if (bootverbose)
604		device_printf(dev, "using npe.%d.%s=%s override\n",
605		    unit, resname, resval);
606	return 1;
607}
608
609static int
610override_npeid(device_t dev, const char *resname, int *npeid)
611{
612	int unit = device_get_unit(dev);
613	const char *resval;
614
615	/* XXX warn for wrong hint type */
616	if (resource_string_value("npe", unit, resname, &resval) != 0)
617		return 0;
618	switch (resval[0]) {
619	case 'A': *npeid = NPE_A; break;
620	case 'B': *npeid = NPE_B; break;
621	case 'C': *npeid = NPE_C; break;
622	default:
623		device_printf(dev, "Warning, bad value %s for "
624		    "npe.%d.%s ignored\n", resval, unit, resname);
625		return 0;
626	}
627	if (bootverbose)
628		device_printf(dev, "using npe.%d.%s=%s override\n",
629		    unit, resname, resval);
630	return 1;
631}
632
633static int
634override_unit(device_t dev, const char *resname, int *val, int min, int max)
635{
636	int unit = device_get_unit(dev);
637	int resval;
638
639	if (resource_int_value("npe", unit, resname, &resval) != 0)
640		return 0;
641	if (!(min <= resval && resval <= max)) {
642		device_printf(dev, "Warning, bad value %d for npe.%d.%s "
643		    "ignored (value must be [%d-%d])\n", resval, unit,
644		    resname, min, max);
645		return 0;
646	}
647	if (bootverbose)
648		device_printf(dev, "using npe.%d.%s=%d override\n",
649		    unit, resname, resval);
650	*val = resval;
651	return 1;
652}
653
654static void
655npe_mac_reset(struct npe_softc *sc)
656{
657	/*
658	 * Reset MAC core.
659	 */
660	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
661	DELAY(NPE_MAC_RESET_DELAY);
662	/* configure MAC to generate MDC clock */
663	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
664}
665
666static int
667npe_activate(device_t dev)
668{
669	struct npe_softc * sc = device_get_softc(dev);
670	int error, i, macbase, miibase;
671
672	/*
673	 * Setup NEP ID, MAC, and MII bindings.  We allow override
674	 * via hints to handle unexpected board configs.
675	 */
676	if (!override_npeid(dev, "npeid", &sc->sc_npeid))
677		sc->sc_npeid = unit2npeid(device_get_unit(dev));
678	sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
679	if (sc->sc_npe == NULL) {
680		device_printf(dev, "cannot attach ixpnpe\n");
681		return EIO;		/* XXX */
682	}
683
684	/* MAC */
685	if (!override_addr(dev, "mac", &macbase))
686		macbase = npeconfig[sc->sc_npeid].macbase;
687	device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
688	if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
689		device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
690		    macbase, IXP425_REG_SIZE);
691		return ENOMEM;
692	}
693
694	/* PHY */
695	if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1))
696		sc->sc_phy = npeconfig[sc->sc_npeid].phy;
697	if (!override_addr(dev, "mii", &miibase))
698		miibase = npeconfig[sc->sc_npeid].miibase;
699	device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
700	if (miibase != macbase) {
701		/*
702		 * PHY is mapped through a different MAC, setup an
703		 * additional mapping for frobbing the PHY registers.
704		 */
705		if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
706			device_printf(dev,
707			    "cannot map MII registers 0x%x:0x%x\n",
708			    miibase, IXP425_REG_SIZE);
709			return ENOMEM;
710		}
711	} else
712		sc->sc_miih = sc->sc_ioh;
713
714	/*
715	 * Load NPE firmware and start it running.
716	 */
717	error = ixpnpe_init(sc->sc_npe);
718	if (error != 0) {
719		device_printf(dev, "cannot init NPE (error %d)\n", error);
720		return error;
721	}
722
723	/* probe for PHY */
724	if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
725		device_printf(dev, "cannot find PHY %d.\n", sc->sc_phy);
726		return ENXIO;
727	}
728
729	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
730	if (error != 0)
731		return error;
732	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
733	if (error != 0)
734		return error;
735
736	/* setup statistics block */
737	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
738	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
739	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
740	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
741	if (error != 0) {
742		device_printf(sc->sc_dev, "unable to create stats tag, "
743		     "error %u\n", error);
744		return error;
745	}
746	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
747	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
748		device_printf(sc->sc_dev,
749		     "unable to allocate memory for stats block, error %u\n",
750		     error);
751		return error;
752	}
753	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
754	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
755		device_printf(sc->sc_dev,
756		     "unable to load memory for stats block, error %u\n",
757		     error);
758		return error;
759	}
760	sc->sc_stats_phys = sc->buf_phys;
761
762	/*
763	 * Setup h/w rx/tx queues.  There are four q's:
764	 *   rx		inbound q of rx'd frames
765	 *   rx_free	pool of ixpbuf's for receiving frames
766	 *   tx		outbound q of frames to send
767	 *   tx_done	q of tx frames that have been processed
768	 *
769	 * The NPE handles the actual tx/rx process and the q manager
770	 * handles the queues.  The driver just writes entries to the
771	 * q manager mailbox's and gets callbacks when there are rx'd
772	 * frames to process or tx'd frames to reap.  These callbacks
773	 * are controlled by the q configurations; e.g. we get a
774	 * callback when tx_done has 2 or more frames to process and
775	 * when the rx q has at least one frame.  These setings can
776	 * changed at the time the q is configured.
777	 */
778	sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
779	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
780		IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
781	sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
782	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
783	/*
784	 * Setup the NPE to direct all traffic to rx_qid.
785	 * When QoS is enabled in the firmware there are
786	 * 8 traffic classes; otherwise just 4.
787	 */
788	for (i = 0; i < 8; i++)
789		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
790
791	/* disable firewall mode just in case (should be off) */
792	npe_setfirewallmode(sc, 0);
793
794	sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
795	sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
796	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
797	if (tx_doneqid == -1) {
798		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
799			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
800		tx_doneqid = sc->tx_doneqid;
801	}
802
803	KASSERT(npes[sc->sc_npeid] == NULL,
804	    ("npe %u already setup", sc->sc_npeid));
805	npes[sc->sc_npeid] = sc;
806
807	return 0;
808}
809
810static void
811npe_deactivate(device_t dev)
812{
813	struct npe_softc *sc = device_get_softc(dev);
814
815	npes[sc->sc_npeid] = NULL;
816
817	/* XXX disable q's */
818	if (sc->sc_npe != NULL) {
819		ixpnpe_stop(sc->sc_npe);
820		ixpnpe_detach(sc->sc_npe);
821	}
822	if (sc->sc_stats != NULL) {
823		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
824		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
825			sc->sc_stats_map);
826	}
827	if (sc->sc_stats_tag != NULL)
828		bus_dma_tag_destroy(sc->sc_stats_tag);
829	npe_dma_destroy(sc, &sc->txdma);
830	npe_dma_destroy(sc, &sc->rxdma);
831	bus_generic_detach(sc->sc_dev);
832	if (sc->sc_mii != NULL)
833		device_delete_child(sc->sc_dev, sc->sc_mii);
834}
835
836/*
837 * Change media according to request.
838 */
839static int
840npe_ifmedia_update(struct ifnet *ifp)
841{
842	struct npe_softc *sc = ifp->if_softc;
843	struct mii_data *mii;
844
845	mii = device_get_softc(sc->sc_mii);
846	NPE_LOCK(sc);
847	mii_mediachg(mii);
848	/* XXX push state ourself? */
849	NPE_UNLOCK(sc);
850	return (0);
851}
852
853/*
854 * Notify the world which media we're using.
855 */
856static void
857npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
858{
859	struct npe_softc *sc = ifp->if_softc;
860	struct mii_data *mii;
861
862	mii = device_get_softc(sc->sc_mii);
863	NPE_LOCK(sc);
864	mii_pollstat(mii);
865	ifmr->ifm_active = mii->mii_media_active;
866	ifmr->ifm_status = mii->mii_media_status;
867	NPE_UNLOCK(sc);
868}
869
870static void
871npe_addstats(struct npe_softc *sc)
872{
873#define	NPEADD(x)	sc->sc_totals.x += be32toh(ns->x)
874#define	MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0)
875	struct ifnet *ifp = sc->sc_ifp;
876	struct npestats *ns = sc->sc_stats;
877
878	MIBADD(dot3StatsAlignmentErrors);
879	MIBADD(dot3StatsFCSErrors);
880	MIBADD(dot3StatsInternalMacReceiveErrors);
881	NPEADD(RxOverrunDiscards);
882	NPEADD(RxLearnedEntryDiscards);
883	NPEADD(RxLargeFramesDiscards);
884	NPEADD(RxSTPBlockedDiscards);
885	NPEADD(RxVLANTypeFilterDiscards);
886	NPEADD(RxVLANIdFilterDiscards);
887	NPEADD(RxInvalidSourceDiscards);
888	NPEADD(RxBlackListDiscards);
889	NPEADD(RxWhiteListDiscards);
890	NPEADD(RxUnderflowEntryDiscards);
891	MIBADD(dot3StatsSingleCollisionFrames);
892	MIBADD(dot3StatsMultipleCollisionFrames);
893	MIBADD(dot3StatsDeferredTransmissions);
894	MIBADD(dot3StatsLateCollisions);
895	MIBADD(dot3StatsExcessiveCollisions);
896	MIBADD(dot3StatsInternalMacTransmitErrors);
897	MIBADD(dot3StatsCarrierSenseErrors);
898	NPEADD(TxLargeFrameDiscards);
899	NPEADD(TxVLANIdFilterDiscards);
900
901	sc->mibdata.dot3StatsFrameTooLongs +=
902	      be32toh(ns->RxLargeFramesDiscards)
903	    + be32toh(ns->TxLargeFrameDiscards);
904	sc->mibdata.dot3StatsMissedFrames +=
905	      be32toh(ns->RxOverrunDiscards)
906	    + be32toh(ns->RxUnderflowEntryDiscards);
907
908	ifp->if_oerrors +=
909		  be32toh(ns->dot3StatsInternalMacTransmitErrors)
910		+ be32toh(ns->dot3StatsCarrierSenseErrors)
911		+ be32toh(ns->TxVLANIdFilterDiscards)
912		;
913	ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
914		+ be32toh(ns->dot3StatsInternalMacReceiveErrors)
915		+ be32toh(ns->RxOverrunDiscards)
916		+ be32toh(ns->RxUnderflowEntryDiscards)
917		;
918	ifp->if_collisions +=
919		  be32toh(ns->dot3StatsSingleCollisionFrames)
920		+ be32toh(ns->dot3StatsMultipleCollisionFrames)
921		;
922#undef NPEADD
923#undef MIBADD
924}
925
926static void
927npe_tick(void *xsc)
928{
929#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
930	struct npe_softc *sc = xsc;
931	struct mii_data *mii = device_get_softc(sc->sc_mii);
932	uint32_t msg[2];
933
934	NPE_ASSERT_LOCKED(sc);
935
936	/*
937	 * NB: to avoid sleeping with the softc lock held we
938	 * split the NPE msg processing into two parts.  The
939	 * request for statistics is sent w/o waiting for a
940	 * reply and then on the next tick we retrieve the
941	 * results.  This works because npe_tick is the only
942	 * code that talks via the mailbox's (except at setup).
943	 * This likely can be handled better.
944	 */
945	if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
946		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
947		    BUS_DMASYNC_POSTREAD);
948		npe_addstats(sc);
949	}
950	npe_updatestats(sc);
951	mii_tick(mii);
952
953	npewatchdog(sc);
954
955	/* schedule next poll */
956	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
957#undef ACK
958}
959
960static void
961npe_setmac(struct npe_softc *sc, u_char *eaddr)
962{
963	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
964	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
965	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
966	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
967	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
968	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
969}
970
971static void
972npe_getmac(struct npe_softc *sc, u_char *eaddr)
973{
974	/* NB: the unicast address appears to be loaded from EEPROM on reset */
975	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
976	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
977	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
978	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
979	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
980	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
981}
982
983struct txdone {
984	struct npebuf *head;
985	struct npebuf **tail;
986	int count;
987};
988
989static __inline void
990npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
991{
992	struct ifnet *ifp = sc->sc_ifp;
993
994	NPE_LOCK(sc);
995	*td->tail = sc->tx_free;
996	sc->tx_free = td->head;
997	/*
998	 * We're no longer busy, so clear the busy flag and call the
999	 * start routine to xmit more packets.
1000	 */
1001	ifp->if_opackets += td->count;
1002	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1003	sc->npe_watchdog_timer = 0;
1004	npestart_locked(ifp);
1005	NPE_UNLOCK(sc);
1006}
1007
1008/*
1009 * Q manager callback on tx done queue.  Reap mbufs
1010 * and return tx buffers to the free list.  Finally
1011 * restart output.  Note the microcode has only one
1012 * txdone q wired into it so we must use the NPE ID
1013 * returned with each npehwbuf to decide where to
1014 * send buffers.
1015 */
1016static void
1017npe_txdone(int qid, void *arg)
1018{
1019#define	P2V(a, dma) \
1020	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1021	struct npe_softc *sc0 = arg;
1022	struct npe_softc *sc;
1023	struct npebuf *npe;
1024	struct txdone *td, q[NPE_MAX];
1025	uint32_t entry;
1026
1027	/* XXX no NPE-A support */
1028	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
1029	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
1030	/* XXX max # at a time? */
1031	while (ixpqmgr_qread(qid, &entry) == 0) {
1032		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
1033		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
1034
1035		sc = npes[NPE_QM_Q_NPE(entry)];
1036		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
1037		m_freem(npe->ix_m);
1038		npe->ix_m = NULL;
1039
1040		td = &q[NPE_QM_Q_NPE(entry)];
1041		*td->tail = npe;
1042		td->tail = &npe->ix_next;
1043		td->count++;
1044	}
1045
1046	if (q[NPE_B].count)
1047		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
1048	if (q[NPE_C].count)
1049		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
1050#undef P2V
1051}
1052
1053static int
1054npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
1055{
1056	bus_dma_segment_t segs[1];
1057	struct npedma *dma = &sc->rxdma;
1058	struct npehwbuf *hw;
1059	int error, nseg;
1060
1061	if (m == NULL) {
1062		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1063		if (m == NULL)
1064			return ENOBUFS;
1065	}
1066	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
1067		("ext_size %d", m->m_ext.ext_size));
1068	m->m_pkthdr.len = m->m_len = 1536;
1069	/* backload payload and align ip hdr */
1070	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
1071	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
1072			segs, &nseg, 0);
1073	if (error != 0) {
1074		m_freem(m);
1075		return error;
1076	}
1077	hw = npe->ix_hw;
1078	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1079	/* NB: NPE requires length be a multiple of 64 */
1080	/* NB: buffer length is shifted in word */
1081	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1082	hw->ix_ne[0].next = 0;
1083	npe->ix_m = m;
1084	/* Flush the memory in the mbuf */
1085	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1086	return 0;
1087}
1088
1089/*
1090 * RX q processing for a specific NPE.  Claim entries
1091 * from the hardware queue and pass the frames up the
1092 * stack. Pass the rx buffers to the free list.
1093 */
1094static void
1095npe_rxdone(int qid, void *arg)
1096{
1097#define	P2V(a, dma) \
1098	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1099	struct npe_softc *sc = arg;
1100	struct npedma *dma = &sc->rxdma;
1101	uint32_t entry;
1102
1103	while (ixpqmgr_qread(qid, &entry) == 0) {
1104		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1105		struct mbuf *m;
1106
1107		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1108		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1109		/*
1110		 * Allocate a new mbuf to replenish the rx buffer.
1111		 * If doing so fails we drop the rx'd frame so we
1112		 * can reuse the previous mbuf.  When we're able to
1113		 * allocate a new mbuf dispatch the mbuf w/ rx'd
1114		 * data up the stack and replace it with the newly
1115		 * allocated one.
1116		 */
1117		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1118		if (m != NULL) {
1119			struct mbuf *mrx = npe->ix_m;
1120			struct npehwbuf *hw = npe->ix_hw;
1121			struct ifnet *ifp = sc->sc_ifp;
1122
1123			/* Flush mbuf memory for rx'd data */
1124			bus_dmamap_sync(dma->mtag, npe->ix_map,
1125			    BUS_DMASYNC_POSTREAD);
1126
1127			/* XXX flush hw buffer; works now 'cuz coherent */
1128			/* set m_len etc. per rx frame size */
1129			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1130			mrx->m_pkthdr.len = mrx->m_len;
1131			mrx->m_pkthdr.rcvif = ifp;
1132
1133			ifp->if_ipackets++;
1134			ifp->if_input(ifp, mrx);
1135		} else {
1136			/* discard frame and re-use mbuf */
1137			m = npe->ix_m;
1138		}
1139		if (npe_rxbuf_init(sc, npe, m) == 0) {
1140			/* return npe buf to rx free list */
1141			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1142		} else {
1143			/* XXX should not happen */
1144		}
1145	}
1146#undef P2V
1147}
1148
1149#ifdef DEVICE_POLLING
1150static void
1151npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1152{
1153	struct npe_softc *sc = ifp->if_softc;
1154
1155	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1156		npe_rxdone(sc->rx_qid, sc);
1157		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1158	}
1159}
1160#endif /* DEVICE_POLLING */
1161
1162static void
1163npe_startxmit(struct npe_softc *sc)
1164{
1165	struct npedma *dma = &sc->txdma;
1166	int i;
1167
1168	NPE_ASSERT_LOCKED(sc);
1169	sc->tx_free = NULL;
1170	for (i = 0; i < dma->nbuf; i++) {
1171		struct npebuf *npe = &dma->buf[i];
1172		if (npe->ix_m != NULL) {
1173			/* NB: should not happen */
1174			device_printf(sc->sc_dev,
1175			    "%s: free mbuf at entry %u\n", __func__, i);
1176			m_freem(npe->ix_m);
1177		}
1178		npe->ix_m = NULL;
1179		npe->ix_next = sc->tx_free;
1180		sc->tx_free = npe;
1181	}
1182}
1183
1184static void
1185npe_startrecv(struct npe_softc *sc)
1186{
1187	struct npedma *dma = &sc->rxdma;
1188	struct npebuf *npe;
1189	int i;
1190
1191	NPE_ASSERT_LOCKED(sc);
1192	for (i = 0; i < dma->nbuf; i++) {
1193		npe = &dma->buf[i];
1194		npe_rxbuf_init(sc, npe, npe->ix_m);
1195		/* set npe buf on rx free list */
1196		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1197	}
1198}
1199
1200/*
1201 * Reset and initialize the chip
1202 */
1203static void
1204npeinit_locked(void *xsc)
1205{
1206	struct npe_softc *sc = xsc;
1207	struct ifnet *ifp = sc->sc_ifp;
1208
1209	NPE_ASSERT_LOCKED(sc);
1210if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1211
1212	/*
1213	 * Reset MAC core.
1214	 */
1215	npe_mac_reset(sc);
1216
1217	/* disable transmitter and reciver in the MAC */
1218 	WR4(sc, NPE_MAC_RX_CNTRL1,
1219	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1220 	WR4(sc, NPE_MAC_TX_CNTRL1,
1221	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1222
1223	/*
1224	 * Set the MAC core registers.
1225	 */
1226	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1227	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1228	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1229	/* thresholds determined by NPE firmware FS */
1230	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1231	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1232	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1233	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1234	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1235	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1236	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1237	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1238
1239	WR4(sc, NPE_MAC_TX_CNTRL1,
1240		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1241		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1242		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1243		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1244	/* XXX pad strip? */
1245	/* ena pause frame handling */
1246	WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
1247	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1248
1249	npe_setmac(sc, IF_LLADDR(ifp));
1250	npe_setmcast(sc);
1251
1252	npe_startxmit(sc);
1253	npe_startrecv(sc);
1254
1255	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1256	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1257	sc->npe_watchdog_timer = 0;		/* just in case */
1258
1259	/* enable transmitter and reciver in the MAC */
1260 	WR4(sc, NPE_MAC_RX_CNTRL1,
1261	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1262 	WR4(sc, NPE_MAC_TX_CNTRL1,
1263	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1264
1265	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1266}
1267
1268static void
1269npeinit(void *xsc)
1270{
1271	struct npe_softc *sc = xsc;
1272	NPE_LOCK(sc);
1273	npeinit_locked(sc);
1274	NPE_UNLOCK(sc);
1275}
1276
1277/*
1278 * Dequeue packets and place on the h/w transmit queue.
1279 */
1280static void
1281npestart_locked(struct ifnet *ifp)
1282{
1283	struct npe_softc *sc = ifp->if_softc;
1284	struct npebuf *npe;
1285	struct npehwbuf *hw;
1286	struct mbuf *m, *n;
1287	struct npedma *dma = &sc->txdma;
1288	bus_dma_segment_t segs[NPE_MAXSEG];
1289	int nseg, len, error, i;
1290	uint32_t next;
1291
1292	NPE_ASSERT_LOCKED(sc);
1293	/* XXX can this happen? */
1294	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1295		return;
1296
1297	while (sc->tx_free != NULL) {
1298		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1299		if (m == NULL) {
1300			/* XXX? */
1301			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1302			return;
1303		}
1304		npe = sc->tx_free;
1305		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1306		    m, segs, &nseg, 0);
1307		if (error == EFBIG) {
1308			n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
1309			if (n == NULL) {
1310				if_printf(ifp, "%s: too many fragments %u\n",
1311				    __func__, nseg);
1312				m_freem(m);
1313				return;	/* XXX? */
1314			}
1315			m = n;
1316			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1317			    m, segs, &nseg, 0);
1318		}
1319		if (error != 0 || nseg == 0) {
1320			if_printf(ifp, "%s: error %u nseg %u\n",
1321			    __func__, error, nseg);
1322			m_freem(m);
1323			return;	/* XXX? */
1324		}
1325		sc->tx_free = npe->ix_next;
1326
1327		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1328
1329		/*
1330		 * Tap off here if there is a bpf listener.
1331		 */
1332		BPF_MTAP(ifp, m);
1333
1334		npe->ix_m = m;
1335		hw = npe->ix_hw;
1336		len = m->m_pkthdr.len;
1337		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1338		for (i = 0; i < nseg; i++) {
1339			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1340			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1341			hw->ix_ne[i].next = htobe32(next);
1342
1343			len = 0;		/* zero for segments > 1 */
1344			next += sizeof(hw->ix_ne[0]);
1345		}
1346		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1347		/* XXX flush descriptor instead of using uncached memory */
1348
1349		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1350		    __func__, sc->tx_qid, npe->ix_neaddr,
1351		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1352		/* stick it on the tx q */
1353		/* XXX add vlan priority */
1354		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1355
1356		sc->npe_watchdog_timer = 5;
1357	}
1358	if (sc->tx_free == NULL)
1359		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1360}
1361
1362void
1363npestart(struct ifnet *ifp)
1364{
1365	struct npe_softc *sc = ifp->if_softc;
1366	NPE_LOCK(sc);
1367	npestart_locked(ifp);
1368	NPE_UNLOCK(sc);
1369}
1370
1371static void
1372npe_stopxmit(struct npe_softc *sc)
1373{
1374	struct npedma *dma = &sc->txdma;
1375	int i;
1376
1377	NPE_ASSERT_LOCKED(sc);
1378
1379	/* XXX qmgr */
1380	for (i = 0; i < dma->nbuf; i++) {
1381		struct npebuf *npe = &dma->buf[i];
1382
1383		if (npe->ix_m != NULL) {
1384			bus_dmamap_unload(dma->mtag, npe->ix_map);
1385			m_freem(npe->ix_m);
1386			npe->ix_m = NULL;
1387		}
1388	}
1389}
1390
1391static void
1392npe_stoprecv(struct npe_softc *sc)
1393{
1394	struct npedma *dma = &sc->rxdma;
1395	int i;
1396
1397	NPE_ASSERT_LOCKED(sc);
1398
1399	/* XXX qmgr */
1400	for (i = 0; i < dma->nbuf; i++) {
1401		struct npebuf *npe = &dma->buf[i];
1402
1403		if (npe->ix_m != NULL) {
1404			bus_dmamap_unload(dma->mtag, npe->ix_map);
1405			m_freem(npe->ix_m);
1406			npe->ix_m = NULL;
1407		}
1408	}
1409}
1410
1411/*
1412 * Turn off interrupts, and stop the nic.
1413 */
1414void
1415npestop(struct npe_softc *sc)
1416{
1417	struct ifnet *ifp = sc->sc_ifp;
1418
1419	/*  disable transmitter and reciver in the MAC  */
1420 	WR4(sc, NPE_MAC_RX_CNTRL1,
1421	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1422 	WR4(sc, NPE_MAC_TX_CNTRL1,
1423	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1424
1425	sc->npe_watchdog_timer = 0;
1426	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1427
1428	callout_stop(&sc->tick_ch);
1429
1430	npe_stopxmit(sc);
1431	npe_stoprecv(sc);
1432	/* XXX go into loopback & drain q's? */
1433	/* XXX but beware of disabling tx above */
1434
1435	/*
1436	 * The MAC core rx/tx disable may leave the MAC hardware in an
1437	 * unpredictable state. A hw reset is executed before resetting
1438	 * all the MAC parameters to a known value.
1439	 */
1440	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1441	DELAY(NPE_MAC_RESET_DELAY);
1442	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1443	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1444}
1445
1446void
1447npewatchdog(struct npe_softc *sc)
1448{
1449	NPE_ASSERT_LOCKED(sc);
1450
1451	if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1452		return;
1453
1454	device_printf(sc->sc_dev, "watchdog timeout\n");
1455	sc->sc_ifp->if_oerrors++;
1456
1457	npeinit_locked(sc);
1458}
1459
1460static int
1461npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1462{
1463	struct npe_softc *sc = ifp->if_softc;
1464 	struct mii_data *mii;
1465 	struct ifreq *ifr = (struct ifreq *)data;
1466	int error = 0;
1467#ifdef DEVICE_POLLING
1468	int mask;
1469#endif
1470
1471	switch (cmd) {
1472	case SIOCSIFFLAGS:
1473		NPE_LOCK(sc);
1474		if ((ifp->if_flags & IFF_UP) == 0 &&
1475		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1476			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1477			npestop(sc);
1478		} else {
1479			/* reinitialize card on any parameter change */
1480			npeinit_locked(sc);
1481		}
1482		NPE_UNLOCK(sc);
1483		break;
1484
1485	case SIOCADDMULTI:
1486	case SIOCDELMULTI:
1487		/* update multicast filter list. */
1488		NPE_LOCK(sc);
1489		npe_setmcast(sc);
1490		NPE_UNLOCK(sc);
1491		error = 0;
1492		break;
1493
1494  	case SIOCSIFMEDIA:
1495  	case SIOCGIFMEDIA:
1496 		mii = device_get_softc(sc->sc_mii);
1497 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1498  		break;
1499
1500#ifdef DEVICE_POLLING
1501	case SIOCSIFCAP:
1502		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1503		if (mask & IFCAP_POLLING) {
1504			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1505				error = ether_poll_register(npe_poll, ifp);
1506				if (error)
1507					return error;
1508				NPE_LOCK(sc);
1509				/* disable callbacks XXX txdone is shared */
1510				ixpqmgr_notify_disable(sc->rx_qid);
1511				ixpqmgr_notify_disable(sc->tx_doneqid);
1512				ifp->if_capenable |= IFCAP_POLLING;
1513				NPE_UNLOCK(sc);
1514			} else {
1515				error = ether_poll_deregister(ifp);
1516				/* NB: always enable qmgr callbacks */
1517				NPE_LOCK(sc);
1518				/* enable qmgr callbacks */
1519				ixpqmgr_notify_enable(sc->rx_qid,
1520				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1521				ixpqmgr_notify_enable(sc->tx_doneqid,
1522				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1523				ifp->if_capenable &= ~IFCAP_POLLING;
1524				NPE_UNLOCK(sc);
1525			}
1526		}
1527		break;
1528#endif
1529	default:
1530		error = ether_ioctl(ifp, cmd, data);
1531		break;
1532	}
1533	return error;
1534}
1535
1536/*
1537 * Setup a traffic class -> rx queue mapping.
1538 */
1539static int
1540npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1541{
1542	uint32_t msg[2];
1543
1544	msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
1545	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1546	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1547}
1548
1549static int
1550npe_setfirewallmode(struct npe_softc *sc, int onoff)
1551{
1552	uint32_t msg[2];
1553
1554	/* XXX honor onoff */
1555	msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
1556	msg[1] = 0;
1557	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1558}
1559
1560/*
1561 * Update and reset the statistics in the NPE.
1562 */
1563static int
1564npe_updatestats(struct npe_softc *sc)
1565{
1566	uint32_t msg[2];
1567
1568	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1569	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1570	return ixpnpe_sendmsg_async(sc->sc_npe, msg);
1571}
1572
1573#if 0
1574/*
1575 * Get the current statistics block.
1576 */
1577static int
1578npe_getstats(struct npe_softc *sc)
1579{
1580	uint32_t msg[2];
1581
1582	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1583	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1584	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1585}
1586
1587/*
1588 * Query the image id of the loaded firmware.
1589 */
1590static uint32_t
1591npe_getimageid(struct npe_softc *sc)
1592{
1593	uint32_t msg[2];
1594
1595	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1596	msg[1] = 0;
1597	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1598}
1599
1600/*
1601 * Enable/disable loopback.
1602 */
1603static int
1604npe_setloopback(struct npe_softc *sc, int ena)
1605{
1606	uint32_t msg[2];
1607
1608	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1609	msg[1] = 0;
1610	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1611}
1612#endif
1613
1614static void
1615npe_child_detached(device_t dev, device_t child)
1616{
1617	struct npe_softc *sc;
1618
1619	sc = device_get_softc(dev);
1620	if (child == sc->sc_mii)
1621		sc->sc_mii = NULL;
1622}
1623
1624/*
1625 * MII bus support routines.
1626 */
1627#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1628#define	MII_WR4(sc, reg, v) \
1629	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1630
1631static uint32_t
1632npe_mii_mdio_read(struct npe_softc *sc, int reg)
1633{
1634	uint32_t v;
1635
1636	/* NB: registers are known to be sequential */
1637	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1638	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1639	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1640	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1641	return v;
1642}
1643
1644static void
1645npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1646{
1647	/* NB: registers are known to be sequential */
1648	MII_WR4(sc, reg+0, cmd & 0xff);
1649	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1650	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1651	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1652}
1653
1654static int
1655npe_mii_mdio_wait(struct npe_softc *sc)
1656{
1657	uint32_t v;
1658	int i;
1659
1660	/* NB: typically this takes 25-30 trips */
1661	for (i = 0; i < 1000; i++) {
1662		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1663		if ((v & NPE_MII_GO) == 0)
1664			return 1;
1665		DELAY(1);
1666	}
1667	device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
1668	    __func__, v);
1669	return 0;		/* NB: timeout */
1670}
1671
1672static int
1673npe_miibus_readreg(device_t dev, int phy, int reg)
1674{
1675	struct npe_softc *sc = device_get_softc(dev);
1676	uint32_t v;
1677
1678	if (phy != sc->sc_phy)		/* XXX no auto-detect */
1679		return 0xffff;
1680	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
1681	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1682	if (npe_mii_mdio_wait(sc))
1683		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1684	else
1685		v = 0xffff | NPE_MII_READ_FAIL;
1686	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1687}
1688
1689static void
1690npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1691{
1692	struct npe_softc *sc = device_get_softc(dev);
1693	uint32_t v;
1694
1695	if (phy != sc->sc_phy)		/* XXX */
1696		return;
1697	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1698	  | data | NPE_MII_WRITE
1699	  | NPE_MII_GO;
1700	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1701	/* XXX complain about timeout */
1702	(void) npe_mii_mdio_wait(sc);
1703}
1704
1705static void
1706npe_miibus_statchg(device_t dev)
1707{
1708	struct npe_softc *sc = device_get_softc(dev);
1709	struct mii_data *mii = device_get_softc(sc->sc_mii);
1710	uint32_t tx1, rx1;
1711
1712	/* sync MAC duplex state */
1713	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1714	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1715	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1716		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1717		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1718	} else {
1719		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1720		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1721	}
1722	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1723	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1724}
1725
1726static device_method_t npe_methods[] = {
1727	/* Device interface */
1728	DEVMETHOD(device_probe,		npe_probe),
1729	DEVMETHOD(device_attach,	npe_attach),
1730	DEVMETHOD(device_detach,	npe_detach),
1731
1732	/* Bus interface */
1733	DEVMETHOD(bus_child_detached,	npe_child_detached),
1734
1735	/* MII interface */
1736	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1737	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1738	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1739
1740	{ 0, 0 }
1741};
1742
1743static driver_t npe_driver = {
1744	"npe",
1745	npe_methods,
1746	sizeof(struct npe_softc),
1747};
1748
1749DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1750DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1751MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1752MODULE_DEPEND(npe, miibus, 1, 1, 1);
1753MODULE_DEPEND(npe, ether, 1, 1, 1);
1754