if_npe.c revision 189642
190792Sgshapiro/*-
2261370Sgshapiro * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
390792Sgshapiro *
490792Sgshapiro * Redistribution and use in source and binary forms, with or without
590792Sgshapiro * modification, are permitted provided that the following conditions
690792Sgshapiro * are met:
790792Sgshapiro * 1. Redistributions of source code must retain the above copyright
890792Sgshapiro *    notice, this list of conditions and the following disclaimer.
9266711Sgshapiro * 2. Redistributions in binary form must reproduce the above copyright
1090792Sgshapiro *    notice, this list of conditions and the following disclaimer in the
1190792Sgshapiro *    documentation and/or other materials provided with the distribution.
1290792Sgshapiro *
1390792Sgshapiro * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1490792Sgshapiro * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
1590792Sgshapiro * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1690792Sgshapiro * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
1790792Sgshapiro * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
1890792Sgshapiro * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1990792Sgshapiro * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2090792Sgshapiro * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2190792Sgshapiro * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
2290792Sgshapiro * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2390792Sgshapiro */
2490792Sgshapiro
2590792Sgshapiro#include <sys/cdefs.h>
2690792Sgshapiro__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 189642 2009-03-10 19:18:11Z sam $");
2790792Sgshapiro
2890792Sgshapiro/*
2990792Sgshapiro * Intel XScale NPE Ethernet driver.
3090792Sgshapiro *
3190792Sgshapiro * This driver handles the two ports present on the IXP425.
3290792Sgshapiro * Packet processing is done by the Network Processing Engines
3390792Sgshapiro * (NPE's) that work together with a MAC and PHY. The MAC
34102528Sgshapiro * is also mapped to the XScale cpu; the PHY is accessed via
3590792Sgshapiro * the MAC. NPE-XScale communication happens through h/w
3690792Sgshapiro * queues managed by the Q Manager block.
3790792Sgshapiro *
3890792Sgshapiro * The code here replaces the ethAcc, ethMii, and ethDB classes
3990792Sgshapiro * in the Intel Access Library (IAL) and the OS-specific driver.
4090792Sgshapiro *
4190792Sgshapiro * XXX add vlan support
4290792Sgshapiro */
4390792Sgshapiro#ifdef HAVE_KERNEL_OPTION_HEADERS
4490792Sgshapiro#include "opt_device_polling.h"
4590792Sgshapiro#endif
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/kernel.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/module.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/endian.h>
59#include <machine/bus.h>
60
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_mib.h>
67#include <net/if_types.h>
68
69#ifdef INET
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/in_var.h>
73#include <netinet/ip.h>
74#endif
75
76#include <net/bpf.h>
77#include <net/bpfdesc.h>
78
79#include <arm/xscale/ixp425/ixp425reg.h>
80#include <arm/xscale/ixp425/ixp425var.h>
81#include <arm/xscale/ixp425/ixp425_qmgr.h>
82#include <arm/xscale/ixp425/ixp425_npevar.h>
83
84#include <dev/mii/mii.h>
85#include <dev/mii/miivar.h>
86#include <arm/xscale/ixp425/if_npereg.h>
87
88#include <machine/armreg.h>
89
90#include "miibus_if.h"
91
92/*
93 * XXX: For the main bus dma tag. Can go away if the new method to get the
94 * dma tag from the parent got MFC'd into RELENG_6.
95 */
96extern struct ixp425_softc *ixp425_softc;
97
98struct npebuf {
99	struct npebuf	*ix_next;	/* chain to next buffer */
100	void		*ix_m;		/* backpointer to mbuf */
101	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
102	struct npehwbuf	*ix_hw;		/* associated h/w block */
103	uint32_t	ix_neaddr;	/* phys address of ix_hw */
104};
105
106struct npedma {
107	const char*	name;
108	int		nbuf;		/* # npebuf's allocated */
109	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
110	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
111	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
112	bus_dmamap_t	buf_map;
113	bus_addr_t	buf_phys;	/* phys addr of buffers */
114	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
115};
116
117struct npe_softc {
118	/* XXX mii requires this be first; do not move! */
119	struct ifnet	*sc_ifp;	/* ifnet pointer */
120	struct mtx	sc_mtx;		/* basically a perimeter lock */
121	device_t	sc_dev;
122	bus_space_tag_t	sc_iot;
123	bus_space_handle_t sc_ioh;	/* MAC register window */
124	device_t	sc_mii;		/* child miibus */
125	bus_space_handle_t sc_miih;	/* MII register window */
126	int		sc_npeid;
127	struct ixpnpe_softc *sc_npe;	/* NPE support */
128	int		sc_debug;	/* DPRINTF* control */
129	int		sc_tickinterval;
130	struct callout	tick_ch;	/* Tick callout */
131	int		npe_watchdog_timer;
132	struct npedma	txdma;
133	struct npebuf	*tx_free;	/* list of free tx buffers */
134	struct npedma	rxdma;
135	bus_addr_t	buf_phys;	/* XXX for returning a value */
136	int		rx_qid;		/* rx qid */
137	int		rx_freeqid;	/* rx free buffers qid */
138	int		tx_qid;		/* tx qid */
139	int		tx_doneqid;	/* tx completed qid */
140	int		sc_phy;		/* PHY id */
141	struct ifmib_iso_8802_3 mibdata;
142	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
143	struct npestats	*sc_stats;
144	bus_dmamap_t	sc_stats_map;
145	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
146};
147
148/*
149 * Static configuration for IXP425.  The tx and
150 * rx free Q id's are fixed by the NPE microcode.  The
151 * rx Q id's are programmed to be separate to simplify
152 * multi-port processing.  It may be better to handle
153 * all traffic through one Q (as done by the Intel drivers).
154 *
155 * Note that the PHY's are accessible only from MAC A
156 * on the IXP425.  This and other platform-specific
157 * assumptions probably need to be handled through hints.
158 */
159static const struct {
160	uint32_t	macbase;
161	uint32_t	miibase;
162	int		phy;		/* phy id */
163	uint8_t		rx_qid;
164	uint8_t		rx_freeqid;
165	uint8_t		tx_qid;
166	uint8_t		tx_doneqid;
167} npeconfig[NPE_MAX] = {
168	[NPE_A] = {
169	  .macbase	= IXP435_MAC_A_HWBASE,
170	  .miibase	= IXP425_MAC_C_HWBASE,
171	  .phy		= 2,
172	  .rx_qid	= 4,
173	  .rx_freeqid	= 26,
174	  .tx_qid	= 23,
175	  .tx_doneqid	= 31
176	},
177	[NPE_B] = {
178	  .macbase	= IXP425_MAC_B_HWBASE,
179	  .miibase	= IXP425_MAC_C_HWBASE,
180	  .phy		= 0,
181	  .rx_qid	= 4,
182	  .rx_freeqid	= 27,
183	  .tx_qid	= 24,
184	  .tx_doneqid	= 31
185	},
186	[NPE_C] = {
187	  .macbase	= IXP425_MAC_C_HWBASE,
188	  .miibase	= IXP425_MAC_C_HWBASE,
189	  .phy		= 1,
190	  .rx_qid	= 12,
191	  .rx_freeqid	= 28,
192	  .tx_qid	= 25,
193	  .tx_doneqid	= 31
194	},
195};
196static struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
197
198static __inline uint32_t
199RD4(struct npe_softc *sc, bus_size_t off)
200{
201	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
202}
203
204static __inline void
205WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
206{
207	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
208}
209
210#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
211#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
212#define NPE_LOCK_INIT(_sc) \
213	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
214	    MTX_NETWORK_LOCK, MTX_DEF)
215#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
216#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
217#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
218
219static devclass_t npe_devclass;
220
221static int	override_npeid(device_t, const char *resname, int *val);
222static int	npe_activate(device_t dev);
223static void	npe_deactivate(device_t dev);
224static int	npe_ifmedia_update(struct ifnet *ifp);
225static void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
226static void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
227static void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
228static void	npe_txdone(int qid, void *arg);
229static int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
230			struct mbuf *);
231static void	npe_rxdone(int qid, void *arg);
232static void	npeinit(void *);
233static void	npestart_locked(struct ifnet *);
234static void	npestart(struct ifnet *);
235static void	npestop(struct npe_softc *);
236static void	npewatchdog(struct npe_softc *);
237static int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
238
239static int	npe_setrxqosentry(struct npe_softc *, int classix,
240			int trafclass, int qid);
241static int	npe_setfirewallmode(struct npe_softc *, int onoff);
242static int	npe_updatestats(struct npe_softc *);
243#if 0
244static int	npe_getstats(struct npe_softc *);
245static uint32_t	npe_getimageid(struct npe_softc *);
246static int	npe_setloopback(struct npe_softc *, int ena);
247#endif
248
249/* NB: all tx done processing goes through one queue */
250static int tx_doneqid = -1;
251
252SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP4XX NPE driver parameters");
253
254static int npe_debug = 0;
255SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
256	   0, "IXP4XX NPE network interface debug msgs");
257TUNABLE_INT("hw.npe.debug", &npe_debug);
258#define	DPRINTF(sc, fmt, ...) do {					\
259	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
260} while (0)
261#define	DPRINTFn(n, sc, fmt, ...) do {					\
262	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
263} while (0)
264static int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
265SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
266	    0, "periodic work interval (secs)");
267TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
268
269static	int npe_rxbuf = 64;		/* # rx buffers to allocate */
270SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
271	    0, "rx buffers allocated");
272TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
273static	int npe_txbuf = 128;		/* # tx buffers to allocate */
274SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
275	    0, "tx buffers allocated");
276TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
277
278static int
279unit2npeid(int unit)
280{
281	static const int npeidmap[2][3] = {
282		/* on 425 A is for HSS, B & C are for Ethernet */
283		{ NPE_B, NPE_C, -1 },	/* IXP425 */
284		/* 435 only has A & C, order C then A */
285		{ NPE_C, NPE_A, -1 },	/* IXP435 */
286	};
287	/* XXX check feature register instead */
288	return (unit < 3 ? npeidmap[
289	    (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
290}
291
292static int
293npe_probe(device_t dev)
294{
295	static const char *desc[NPE_MAX] = {
296		[NPE_A] = "IXP NPE-A",
297		[NPE_B] = "IXP NPE-B",
298		[NPE_C] = "IXP NPE-C"
299	};
300	int unit = device_get_unit(dev);
301	int npeid;
302
303	if (unit > 2 ||
304	    (ixp4xx_read_feature_bits() &
305	     (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
306		return EINVAL;
307
308	npeid = -1;
309	if (!override_npeid(dev, "npeid", &npeid))
310		npeid = unit2npeid(unit);
311	if (npeid == -1) {
312		device_printf(dev, "unit %d not supported\n", unit);
313		return EINVAL;
314	}
315	device_set_desc(dev, desc[npeid]);
316	return 0;
317}
318
319static int
320npe_attach(device_t dev)
321{
322	struct npe_softc *sc = device_get_softc(dev);
323	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
324	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
325	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
326	struct ifnet *ifp;
327	int error;
328	u_char eaddr[6];
329
330	sc->sc_dev = dev;
331	sc->sc_iot = sa->sc_iot;
332	NPE_LOCK_INIT(sc);
333	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
334	sc->sc_debug = npe_debug;
335	sc->sc_tickinterval = npe_tickinterval;
336
337	ifp = if_alloc(IFT_ETHER);
338	if (ifp == NULL) {
339		device_printf(dev, "cannot allocate ifnet\n");
340		error = EIO;		/* XXX */
341		goto out;
342	}
343	/* NB: must be setup prior to invoking mii code */
344	sc->sc_ifp = ifp;
345
346	error = npe_activate(dev);
347	if (error) {
348		device_printf(dev, "cannot activate npe\n");
349		goto out;
350	}
351
352	npe_getmac(sc, eaddr);
353
354	ifp->if_softc = sc;
355	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
356	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357	ifp->if_start = npestart;
358	ifp->if_ioctl = npeioctl;
359	ifp->if_init = npeinit;
360	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
361	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
362	IFQ_SET_READY(&ifp->if_snd);
363	ifp->if_linkmib = &sc->mibdata;
364	ifp->if_linkmiblen = sizeof(sc->mibdata);
365	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
366#ifdef DEVICE_POLLING
367	ifp->if_capabilities |= IFCAP_POLLING;
368#endif
369
370	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
371	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
372	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
373	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
374
375	ether_ifattach(ifp, eaddr);
376	return 0;
377out:
378	if (ifp != NULL)
379		if_free(ifp);
380	NPE_LOCK_DESTROY(sc);
381	npe_deactivate(dev);
382	return error;
383}
384
385static int
386npe_detach(device_t dev)
387{
388	struct npe_softc *sc = device_get_softc(dev);
389	struct ifnet *ifp = sc->sc_ifp;
390
391#ifdef DEVICE_POLLING
392	if (ifp->if_capenable & IFCAP_POLLING)
393		ether_poll_deregister(ifp);
394#endif
395	npestop(sc);
396	if (ifp != NULL) {
397		ether_ifdetach(ifp);
398		if_free(ifp);
399	}
400	NPE_LOCK_DESTROY(sc);
401	npe_deactivate(dev);
402	return 0;
403}
404
405/*
406 * Compute and install the multicast filter.
407 */
408static void
409npe_setmcast(struct npe_softc *sc)
410{
411	struct ifnet *ifp = sc->sc_ifp;
412	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
413	int i;
414
415	if (ifp->if_flags & IFF_PROMISC) {
416		memset(mask, 0, ETHER_ADDR_LEN);
417		memset(addr, 0, ETHER_ADDR_LEN);
418	} else if (ifp->if_flags & IFF_ALLMULTI) {
419		static const uint8_t allmulti[ETHER_ADDR_LEN] =
420		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
421		memcpy(mask, allmulti, ETHER_ADDR_LEN);
422		memcpy(addr, allmulti, ETHER_ADDR_LEN);
423	} else {
424		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
425		struct ifmultiaddr *ifma;
426		const uint8_t *mac;
427
428		memset(clr, 0, ETHER_ADDR_LEN);
429		memset(set, 0xff, ETHER_ADDR_LEN);
430
431		IF_ADDR_LOCK(ifp);
432		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
433			if (ifma->ifma_addr->sa_family != AF_LINK)
434				continue;
435			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
436			for (i = 0; i < ETHER_ADDR_LEN; i++) {
437				clr[i] |= mac[i];
438				set[i] &= mac[i];
439			}
440		}
441		IF_ADDR_UNLOCK(ifp);
442
443		for (i = 0; i < ETHER_ADDR_LEN; i++) {
444			mask[i] = set[i] | ~clr[i];
445			addr[i] = set[i];
446		}
447	}
448
449	/*
450	 * Write the mask and address registers.
451	 */
452	for (i = 0; i < ETHER_ADDR_LEN; i++) {
453		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
454		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
455	}
456}
457
458static void
459npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
460{
461	struct npe_softc *sc;
462
463	if (error != 0)
464		return;
465	sc = (struct npe_softc *)arg;
466	sc->buf_phys = segs[0].ds_addr;
467}
468
469static int
470npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
471	const char *name, int nbuf, int maxseg)
472{
473	int error, i;
474
475	memset(dma, 0, sizeof(*dma));
476
477	dma->name = name;
478	dma->nbuf = nbuf;
479
480	/* DMA tag for mapped mbufs  */
481	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
482	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
483	    MCLBYTES, maxseg, MCLBYTES, 0,
484	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
485	if (error != 0) {
486		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
487		     "error %u\n", dma->name, error);
488		return error;
489	}
490
491	/* DMA tag and map for the NPE buffers */
492	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
493	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
494	    nbuf * sizeof(struct npehwbuf), 1,
495	    nbuf * sizeof(struct npehwbuf), 0,
496	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
497	if (error != 0) {
498		device_printf(sc->sc_dev,
499		    "unable to create %s npebuf dma tag, error %u\n",
500		    dma->name, error);
501		return error;
502	}
503	/* XXX COHERENT for now */
504	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
505	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
506	    &dma->buf_map) != 0) {
507		device_printf(sc->sc_dev,
508		     "unable to allocate memory for %s h/w buffers, error %u\n",
509		     dma->name, error);
510		return error;
511	}
512	/* XXX M_TEMP */
513	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
514	if (dma->buf == NULL) {
515		device_printf(sc->sc_dev,
516		     "unable to allocate memory for %s s/w buffers\n",
517		     dma->name);
518		return error;
519	}
520	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
521	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
522		device_printf(sc->sc_dev,
523		     "unable to map memory for %s h/w buffers, error %u\n",
524		     dma->name, error);
525		return error;
526	}
527	dma->buf_phys = sc->buf_phys;
528	for (i = 0; i < dma->nbuf; i++) {
529		struct npebuf *npe = &dma->buf[i];
530		struct npehwbuf *hw = &dma->hwbuf[i];
531
532		/* calculate offset to shared area */
533		npe->ix_neaddr = dma->buf_phys +
534			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
535		KASSERT((npe->ix_neaddr & 0x1f) == 0,
536		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
537		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
538				&npe->ix_map);
539		if (error != 0) {
540			device_printf(sc->sc_dev,
541			     "unable to create dmamap for %s buffer %u, "
542			     "error %u\n", dma->name, i, error);
543			return error;
544		}
545		npe->ix_hw = hw;
546	}
547	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
548	return 0;
549}
550
551static void
552npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
553{
554	int i;
555
556	if (dma->hwbuf != NULL) {
557		for (i = 0; i < dma->nbuf; i++) {
558			struct npebuf *npe = &dma->buf[i];
559			bus_dmamap_destroy(dma->mtag, npe->ix_map);
560		}
561		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
562		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
563	}
564	if (dma->buf != NULL)
565		free(dma->buf, M_TEMP);
566	if (dma->buf_tag)
567		bus_dma_tag_destroy(dma->buf_tag);
568	if (dma->mtag)
569		bus_dma_tag_destroy(dma->mtag);
570	memset(dma, 0, sizeof(*dma));
571}
572
573static int
574override_addr(device_t dev, const char *resname, int *base)
575{
576	int unit = device_get_unit(dev);
577	const char *resval;
578
579	/* XXX warn for wrong hint type */
580	if (resource_string_value("npe", unit, resname, &resval) != 0)
581		return 0;
582	switch (resval[0]) {
583	case 'A':
584		*base = IXP435_MAC_A_HWBASE;
585		break;
586	case 'B':
587		*base = IXP425_MAC_B_HWBASE;
588		break;
589	case 'C':
590		*base = IXP425_MAC_C_HWBASE;
591		break;
592	default:
593		device_printf(dev, "Warning, bad value %s for "
594		    "npe.%d.%s ignored\n", resval, unit, resname);
595		return 0;
596	}
597	if (bootverbose)
598		device_printf(dev, "using npe.%d.%s=%s override\n",
599		    unit, resname, resval);
600	return 1;
601}
602
603static int
604override_npeid(device_t dev, const char *resname, int *npeid)
605{
606	int unit = device_get_unit(dev);
607	const char *resval;
608
609	/* XXX warn for wrong hint type */
610	if (resource_string_value("npe", unit, resname, &resval) != 0)
611		return 0;
612	switch (resval[0]) {
613	case 'A': *npeid = NPE_A; break;
614	case 'B': *npeid = NPE_B; break;
615	case 'C': *npeid = NPE_C; break;
616	default:
617		device_printf(dev, "Warning, bad value %s for "
618		    "npe.%d.%s ignored\n", resval, unit, resname);
619		return 0;
620	}
621	if (bootverbose)
622		device_printf(dev, "using npe.%d.%s=%s override\n",
623		    unit, resname, resval);
624	return 1;
625}
626
627static int
628override_unit(device_t dev, const char *resname, int *val, int min, int max)
629{
630	int unit = device_get_unit(dev);
631	int resval;
632
633	if (resource_int_value("npe", unit, resname, &resval) != 0)
634		return 0;
635	if (!(min <= resval && resval <= max)) {
636		device_printf(dev, "Warning, bad value %d for npe.%d.%s "
637		    "ignored (value must be [%d-%d])\n", resval, unit,
638		    resname, min, max);
639		return 0;
640	}
641	if (bootverbose)
642		device_printf(dev, "using npe.%d.%s=%d override\n",
643		    unit, resname, resval);
644	*val = resval;
645	return 1;
646}
647
648static void
649npe_mac_reset(struct npe_softc *sc)
650{
651	/*
652	 * Reset MAC core.
653	 */
654	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
655	DELAY(NPE_MAC_RESET_DELAY);
656	/* configure MAC to generate MDC clock */
657	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
658}
659
660static int
661npe_activate(device_t dev)
662{
663	struct npe_softc * sc = device_get_softc(dev);
664	int error, i, macbase, miibase;
665
666	/*
667	 * Setup NEP ID, MAC, and MII bindings.  We allow override
668	 * via hints to handle unexpected board configs.
669	 */
670	if (!override_npeid(dev, "npeid", &sc->sc_npeid))
671		sc->sc_npeid = unit2npeid(device_get_unit(dev));
672	sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
673	if (sc->sc_npe == NULL) {
674		device_printf(dev, "cannot attach ixpnpe\n");
675		return EIO;		/* XXX */
676	}
677
678	/* MAC */
679	if (!override_addr(dev, "mac", &macbase))
680		macbase = npeconfig[sc->sc_npeid].macbase;
681	device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
682	if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
683		device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
684		    macbase, IXP425_REG_SIZE);
685		return ENOMEM;
686	}
687
688	/* PHY */
689	if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1))
690		sc->sc_phy = npeconfig[sc->sc_npeid].phy;
691	if (!override_addr(dev, "mii", &miibase))
692		miibase = npeconfig[sc->sc_npeid].miibase;
693	device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
694	if (miibase != macbase) {
695		/*
696		 * PHY is mapped through a different MAC, setup an
697		 * additional mapping for frobbing the PHY registers.
698		 */
699		if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
700			device_printf(dev,
701			    "cannot map MII registers 0x%x:0x%x\n",
702			    miibase, IXP425_REG_SIZE);
703			return ENOMEM;
704		}
705	} else
706		sc->sc_miih = sc->sc_ioh;
707
708	/*
709	 * Load NPE firmware and start it running.
710	 */
711	error = ixpnpe_init(sc->sc_npe);
712	if (error != 0) {
713		device_printf(dev, "cannot init NPE (error %d)\n", error);
714		return error;
715	}
716
717	/* probe for PHY */
718	if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
719		device_printf(dev, "cannot find PHY %d.\n", sc->sc_phy);
720		return ENXIO;
721	}
722
723	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
724	if (error != 0)
725		return error;
726	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
727	if (error != 0)
728		return error;
729
730	/* setup statistics block */
731	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
732	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
733	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
734	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
735	if (error != 0) {
736		device_printf(sc->sc_dev, "unable to create stats tag, "
737		     "error %u\n", error);
738		return error;
739	}
740	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
741	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
742		device_printf(sc->sc_dev,
743		     "unable to allocate memory for stats block, error %u\n",
744		     error);
745		return error;
746	}
747	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
748	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
749		device_printf(sc->sc_dev,
750		     "unable to load memory for stats block, error %u\n",
751		     error);
752		return error;
753	}
754	sc->sc_stats_phys = sc->buf_phys;
755
756	/*
757	 * Setup h/w rx/tx queues.  There are four q's:
758	 *   rx		inbound q of rx'd frames
759	 *   rx_free	pool of ixpbuf's for receiving frames
760	 *   tx		outbound q of frames to send
761	 *   tx_done	q of tx frames that have been processed
762	 *
763	 * The NPE handles the actual tx/rx process and the q manager
764	 * handles the queues.  The driver just writes entries to the
765	 * q manager mailbox's and gets callbacks when there are rx'd
766	 * frames to process or tx'd frames to reap.  These callbacks
767	 * are controlled by the q configurations; e.g. we get a
768	 * callback when tx_done has 2 or more frames to process and
769	 * when the rx q has at least one frame.  These setings can
770	 * changed at the time the q is configured.
771	 */
772	sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
773	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
774		IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
775	sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
776	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
777	/*
778	 * Setup the NPE to direct all traffic to rx_qid.
779	 * When QoS is enabled in the firmware there are
780	 * 8 traffic classes; otherwise just 4.
781	 */
782	for (i = 0; i < 8; i++)
783		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
784
785	/* disable firewall mode just in case (should be off) */
786	npe_setfirewallmode(sc, 0);
787
788	sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
789	sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
790	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
791	if (tx_doneqid == -1) {
792		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
793			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
794		tx_doneqid = sc->tx_doneqid;
795	}
796
797	KASSERT(npes[sc->sc_npeid] == NULL,
798	    ("npe %u already setup", sc->sc_npeid));
799	npes[sc->sc_npeid] = sc;
800
801	return 0;
802}
803
804static void
805npe_deactivate(device_t dev)
806{
807	struct npe_softc *sc = device_get_softc(dev);
808
809	npes[sc->sc_npeid] = NULL;
810
811	/* XXX disable q's */
812	if (sc->sc_npe != NULL) {
813		ixpnpe_stop(sc->sc_npe);
814		ixpnpe_detach(sc->sc_npe);
815	}
816	if (sc->sc_stats != NULL) {
817		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
818		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
819			sc->sc_stats_map);
820	}
821	if (sc->sc_stats_tag != NULL)
822		bus_dma_tag_destroy(sc->sc_stats_tag);
823	npe_dma_destroy(sc, &sc->txdma);
824	npe_dma_destroy(sc, &sc->rxdma);
825	bus_generic_detach(sc->sc_dev);
826	if (sc->sc_mii != NULL)
827		device_delete_child(sc->sc_dev, sc->sc_mii);
828}
829
830/*
831 * Change media according to request.
832 */
833static int
834npe_ifmedia_update(struct ifnet *ifp)
835{
836	struct npe_softc *sc = ifp->if_softc;
837	struct mii_data *mii;
838
839	mii = device_get_softc(sc->sc_mii);
840	NPE_LOCK(sc);
841	mii_mediachg(mii);
842	/* XXX push state ourself? */
843	NPE_UNLOCK(sc);
844	return (0);
845}
846
847/*
848 * Notify the world which media we're using.
849 */
850static void
851npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
852{
853	struct npe_softc *sc = ifp->if_softc;
854	struct mii_data *mii;
855
856	mii = device_get_softc(sc->sc_mii);
857	NPE_LOCK(sc);
858	mii_pollstat(mii);
859	ifmr->ifm_active = mii->mii_media_active;
860	ifmr->ifm_status = mii->mii_media_status;
861	NPE_UNLOCK(sc);
862}
863
864static void
865npe_addstats(struct npe_softc *sc)
866{
867#define	MIBADD(x)	sc->mibdata.x += be32toh(ns->x)
868	struct ifnet *ifp = sc->sc_ifp;
869	struct npestats *ns = sc->sc_stats;
870
871	MIBADD(dot3StatsAlignmentErrors);
872	MIBADD(dot3StatsFCSErrors);
873	MIBADD(dot3StatsSingleCollisionFrames);
874	MIBADD(dot3StatsMultipleCollisionFrames);
875	MIBADD(dot3StatsDeferredTransmissions);
876	MIBADD(dot3StatsLateCollisions);
877	MIBADD(dot3StatsExcessiveCollisions);
878	MIBADD(dot3StatsInternalMacTransmitErrors);
879	MIBADD(dot3StatsCarrierSenseErrors);
880	sc->mibdata.dot3StatsFrameTooLongs +=
881	      be32toh(ns->RxLargeFramesDiscards)
882	    + be32toh(ns->TxLargeFrameDiscards);
883	MIBADD(dot3StatsInternalMacReceiveErrors);
884	sc->mibdata.dot3StatsMissedFrames +=
885	      be32toh(ns->RxOverrunDiscards)
886	    + be32toh(ns->RxUnderflowEntryDiscards);
887
888	ifp->if_oerrors +=
889		  be32toh(ns->dot3StatsInternalMacTransmitErrors)
890		+ be32toh(ns->dot3StatsCarrierSenseErrors)
891		+ be32toh(ns->TxVLANIdFilterDiscards)
892		;
893	ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
894		+ be32toh(ns->dot3StatsInternalMacReceiveErrors)
895		+ be32toh(ns->RxOverrunDiscards)
896		+ be32toh(ns->RxUnderflowEntryDiscards)
897		;
898	ifp->if_collisions +=
899		  be32toh(ns->dot3StatsSingleCollisionFrames)
900		+ be32toh(ns->dot3StatsMultipleCollisionFrames)
901		;
902#undef MIBADD
903}
904
905static void
906npe_tick(void *xsc)
907{
908#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
909	struct npe_softc *sc = xsc;
910	struct mii_data *mii = device_get_softc(sc->sc_mii);
911	uint32_t msg[2];
912
913	NPE_ASSERT_LOCKED(sc);
914
915	/*
916	 * NB: to avoid sleeping with the softc lock held we
917	 * split the NPE msg processing into two parts.  The
918	 * request for statistics is sent w/o waiting for a
919	 * reply and then on the next tick we retrieve the
920	 * results.  This works because npe_tick is the only
921	 * code that talks via the mailbox's (except at setup).
922	 * This likely can be handled better.
923	 */
924	if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
925		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
926		    BUS_DMASYNC_POSTREAD);
927		npe_addstats(sc);
928	}
929	npe_updatestats(sc);
930	mii_tick(mii);
931
932	npewatchdog(sc);
933
934	/* schedule next poll */
935	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
936#undef ACK
937}
938
939static void
940npe_setmac(struct npe_softc *sc, u_char *eaddr)
941{
942	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
943	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
944	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
945	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
946	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
947	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
948}
949
950static void
951npe_getmac(struct npe_softc *sc, u_char *eaddr)
952{
953	/* NB: the unicast address appears to be loaded from EEPROM on reset */
954	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
955	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
956	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
957	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
958	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
959	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
960}
961
962struct txdone {
963	struct npebuf *head;
964	struct npebuf **tail;
965	int count;
966};
967
968static __inline void
969npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
970{
971	struct ifnet *ifp = sc->sc_ifp;
972
973	NPE_LOCK(sc);
974	*td->tail = sc->tx_free;
975	sc->tx_free = td->head;
976	/*
977	 * We're no longer busy, so clear the busy flag and call the
978	 * start routine to xmit more packets.
979	 */
980	ifp->if_opackets += td->count;
981	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
982	sc->npe_watchdog_timer = 0;
983	npestart_locked(ifp);
984	NPE_UNLOCK(sc);
985}
986
987/*
988 * Q manager callback on tx done queue.  Reap mbufs
989 * and return tx buffers to the free list.  Finally
990 * restart output.  Note the microcode has only one
991 * txdone q wired into it so we must use the NPE ID
992 * returned with each npehwbuf to decide where to
993 * send buffers.
994 */
995static void
996npe_txdone(int qid, void *arg)
997{
998#define	P2V(a, dma) \
999	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1000	struct npe_softc *sc0 = arg;
1001	struct npe_softc *sc;
1002	struct npebuf *npe;
1003	struct txdone *td, q[NPE_MAX];
1004	uint32_t entry;
1005
1006	/* XXX no NPE-A support */
1007	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
1008	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
1009	/* XXX max # at a time? */
1010	while (ixpqmgr_qread(qid, &entry) == 0) {
1011		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
1012		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
1013
1014		sc = npes[NPE_QM_Q_NPE(entry)];
1015		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
1016		m_freem(npe->ix_m);
1017		npe->ix_m = NULL;
1018
1019		td = &q[NPE_QM_Q_NPE(entry)];
1020		*td->tail = npe;
1021		td->tail = &npe->ix_next;
1022		td->count++;
1023	}
1024
1025	if (q[NPE_B].count)
1026		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
1027	if (q[NPE_C].count)
1028		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
1029#undef P2V
1030}
1031
1032static int
1033npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
1034{
1035	bus_dma_segment_t segs[1];
1036	struct npedma *dma = &sc->rxdma;
1037	struct npehwbuf *hw;
1038	int error, nseg;
1039
1040	if (m == NULL) {
1041		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1042		if (m == NULL)
1043			return ENOBUFS;
1044	}
1045	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
1046		("ext_size %d", m->m_ext.ext_size));
1047	m->m_pkthdr.len = m->m_len = 1536;
1048	/* backload payload and align ip hdr */
1049	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
1050	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
1051			segs, &nseg, 0);
1052	if (error != 0) {
1053		m_freem(m);
1054		return error;
1055	}
1056	hw = npe->ix_hw;
1057	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1058	/* NB: NPE requires length be a multiple of 64 */
1059	/* NB: buffer length is shifted in word */
1060	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1061	hw->ix_ne[0].next = 0;
1062	npe->ix_m = m;
1063	/* Flush the memory in the mbuf */
1064	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1065	return 0;
1066}
1067
1068/*
1069 * RX q processing for a specific NPE.  Claim entries
1070 * from the hardware queue and pass the frames up the
1071 * stack. Pass the rx buffers to the free list.
1072 */
1073static void
1074npe_rxdone(int qid, void *arg)
1075{
1076#define	P2V(a, dma) \
1077	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1078	struct npe_softc *sc = arg;
1079	struct npedma *dma = &sc->rxdma;
1080	uint32_t entry;
1081
1082	while (ixpqmgr_qread(qid, &entry) == 0) {
1083		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1084		struct mbuf *m;
1085
1086		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1087		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1088		/*
1089		 * Allocate a new mbuf to replenish the rx buffer.
1090		 * If doing so fails we drop the rx'd frame so we
1091		 * can reuse the previous mbuf.  When we're able to
1092		 * allocate a new mbuf dispatch the mbuf w/ rx'd
1093		 * data up the stack and replace it with the newly
1094		 * allocated one.
1095		 */
1096		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1097		if (m != NULL) {
1098			struct mbuf *mrx = npe->ix_m;
1099			struct npehwbuf *hw = npe->ix_hw;
1100			struct ifnet *ifp = sc->sc_ifp;
1101
1102			/* Flush mbuf memory for rx'd data */
1103			bus_dmamap_sync(dma->mtag, npe->ix_map,
1104			    BUS_DMASYNC_POSTREAD);
1105
1106			/* XXX flush hw buffer; works now 'cuz coherent */
1107			/* set m_len etc. per rx frame size */
1108			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1109			mrx->m_pkthdr.len = mrx->m_len;
1110			mrx->m_pkthdr.rcvif = ifp;
1111
1112			ifp->if_ipackets++;
1113			ifp->if_input(ifp, mrx);
1114		} else {
1115			/* discard frame and re-use mbuf */
1116			m = npe->ix_m;
1117		}
1118		if (npe_rxbuf_init(sc, npe, m) == 0) {
1119			/* return npe buf to rx free list */
1120			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1121		} else {
1122			/* XXX should not happen */
1123		}
1124	}
1125#undef P2V
1126}
1127
1128#ifdef DEVICE_POLLING
1129static void
1130npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1131{
1132	struct npe_softc *sc = ifp->if_softc;
1133
1134	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1135		npe_rxdone(sc->rx_qid, sc);
1136		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1137	}
1138}
1139#endif /* DEVICE_POLLING */
1140
1141static void
1142npe_startxmit(struct npe_softc *sc)
1143{
1144	struct npedma *dma = &sc->txdma;
1145	int i;
1146
1147	NPE_ASSERT_LOCKED(sc);
1148	sc->tx_free = NULL;
1149	for (i = 0; i < dma->nbuf; i++) {
1150		struct npebuf *npe = &dma->buf[i];
1151		if (npe->ix_m != NULL) {
1152			/* NB: should not happen */
1153			device_printf(sc->sc_dev,
1154			    "%s: free mbuf at entry %u\n", __func__, i);
1155			m_freem(npe->ix_m);
1156		}
1157		npe->ix_m = NULL;
1158		npe->ix_next = sc->tx_free;
1159		sc->tx_free = npe;
1160	}
1161}
1162
1163static void
1164npe_startrecv(struct npe_softc *sc)
1165{
1166	struct npedma *dma = &sc->rxdma;
1167	struct npebuf *npe;
1168	int i;
1169
1170	NPE_ASSERT_LOCKED(sc);
1171	for (i = 0; i < dma->nbuf; i++) {
1172		npe = &dma->buf[i];
1173		npe_rxbuf_init(sc, npe, npe->ix_m);
1174		/* set npe buf on rx free list */
1175		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1176	}
1177}
1178
1179/*
1180 * Reset and initialize the chip
1181 */
1182static void
1183npeinit_locked(void *xsc)
1184{
1185	struct npe_softc *sc = xsc;
1186	struct ifnet *ifp = sc->sc_ifp;
1187
1188	NPE_ASSERT_LOCKED(sc);
1189if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1190
1191	/*
1192	 * Reset MAC core.
1193	 */
1194	npe_mac_reset(sc);
1195
1196	/* disable transmitter and reciver in the MAC */
1197 	WR4(sc, NPE_MAC_RX_CNTRL1,
1198	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1199 	WR4(sc, NPE_MAC_TX_CNTRL1,
1200	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1201
1202	/*
1203	 * Set the MAC core registers.
1204	 */
1205	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1206	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1207	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1208	/* thresholds determined by NPE firmware FS */
1209	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1210	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1211	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1212	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1213	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1214	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1215	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1216	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1217
1218	WR4(sc, NPE_MAC_TX_CNTRL1,
1219		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1220		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1221		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1222		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1223	/* XXX pad strip? */
1224	/* ena pause frame handling */
1225	WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
1226	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1227
1228	npe_setmac(sc, IF_LLADDR(ifp));
1229	npe_setmcast(sc);
1230
1231	npe_startxmit(sc);
1232	npe_startrecv(sc);
1233
1234	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1235	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1236	sc->npe_watchdog_timer = 0;		/* just in case */
1237
1238	/* enable transmitter and reciver in the MAC */
1239 	WR4(sc, NPE_MAC_RX_CNTRL1,
1240	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1241 	WR4(sc, NPE_MAC_TX_CNTRL1,
1242	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1243
1244	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1245}
1246
1247static void
1248npeinit(void *xsc)
1249{
1250	struct npe_softc *sc = xsc;
1251	NPE_LOCK(sc);
1252	npeinit_locked(sc);
1253	NPE_UNLOCK(sc);
1254}
1255
1256/*
1257 * Dequeue packets and place on the h/w transmit queue.
1258 */
1259static void
1260npestart_locked(struct ifnet *ifp)
1261{
1262	struct npe_softc *sc = ifp->if_softc;
1263	struct npebuf *npe;
1264	struct npehwbuf *hw;
1265	struct mbuf *m, *n;
1266	struct npedma *dma = &sc->txdma;
1267	bus_dma_segment_t segs[NPE_MAXSEG];
1268	int nseg, len, error, i;
1269	uint32_t next;
1270
1271	NPE_ASSERT_LOCKED(sc);
1272	/* XXX can this happen? */
1273	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1274		return;
1275
1276	while (sc->tx_free != NULL) {
1277		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1278		if (m == NULL) {
1279			/* XXX? */
1280			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1281			return;
1282		}
1283		npe = sc->tx_free;
1284		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1285		    m, segs, &nseg, 0);
1286		if (error == EFBIG) {
1287			n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
1288			if (n == NULL) {
1289				if_printf(ifp, "%s: too many fragments %u\n",
1290				    __func__, nseg);
1291				m_freem(m);
1292				return;	/* XXX? */
1293			}
1294			m = n;
1295			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1296			    m, segs, &nseg, 0);
1297		}
1298		if (error != 0 || nseg == 0) {
1299			if_printf(ifp, "%s: error %u nseg %u\n",
1300			    __func__, error, nseg);
1301			m_freem(m);
1302			return;	/* XXX? */
1303		}
1304		sc->tx_free = npe->ix_next;
1305
1306		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1307
1308		/*
1309		 * Tap off here if there is a bpf listener.
1310		 */
1311		BPF_MTAP(ifp, m);
1312
1313		npe->ix_m = m;
1314		hw = npe->ix_hw;
1315		len = m->m_pkthdr.len;
1316		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1317		for (i = 0; i < nseg; i++) {
1318			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1319			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1320			hw->ix_ne[i].next = htobe32(next);
1321
1322			len = 0;		/* zero for segments > 1 */
1323			next += sizeof(hw->ix_ne[0]);
1324		}
1325		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1326		/* XXX flush descriptor instead of using uncached memory */
1327
1328		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1329		    __func__, sc->tx_qid, npe->ix_neaddr,
1330		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1331		/* stick it on the tx q */
1332		/* XXX add vlan priority */
1333		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1334
1335		sc->npe_watchdog_timer = 5;
1336	}
1337	if (sc->tx_free == NULL)
1338		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1339}
1340
1341void
1342npestart(struct ifnet *ifp)
1343{
1344	struct npe_softc *sc = ifp->if_softc;
1345	NPE_LOCK(sc);
1346	npestart_locked(ifp);
1347	NPE_UNLOCK(sc);
1348}
1349
1350static void
1351npe_stopxmit(struct npe_softc *sc)
1352{
1353	struct npedma *dma = &sc->txdma;
1354	int i;
1355
1356	NPE_ASSERT_LOCKED(sc);
1357
1358	/* XXX qmgr */
1359	for (i = 0; i < dma->nbuf; i++) {
1360		struct npebuf *npe = &dma->buf[i];
1361
1362		if (npe->ix_m != NULL) {
1363			bus_dmamap_unload(dma->mtag, npe->ix_map);
1364			m_freem(npe->ix_m);
1365			npe->ix_m = NULL;
1366		}
1367	}
1368}
1369
1370static void
1371npe_stoprecv(struct npe_softc *sc)
1372{
1373	struct npedma *dma = &sc->rxdma;
1374	int i;
1375
1376	NPE_ASSERT_LOCKED(sc);
1377
1378	/* XXX qmgr */
1379	for (i = 0; i < dma->nbuf; i++) {
1380		struct npebuf *npe = &dma->buf[i];
1381
1382		if (npe->ix_m != NULL) {
1383			bus_dmamap_unload(dma->mtag, npe->ix_map);
1384			m_freem(npe->ix_m);
1385			npe->ix_m = NULL;
1386		}
1387	}
1388}
1389
1390/*
1391 * Turn off interrupts, and stop the nic.
1392 */
1393void
1394npestop(struct npe_softc *sc)
1395{
1396	struct ifnet *ifp = sc->sc_ifp;
1397
1398	/*  disable transmitter and reciver in the MAC  */
1399 	WR4(sc, NPE_MAC_RX_CNTRL1,
1400	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1401 	WR4(sc, NPE_MAC_TX_CNTRL1,
1402	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1403
1404	sc->npe_watchdog_timer = 0;
1405	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1406
1407	callout_stop(&sc->tick_ch);
1408
1409	npe_stopxmit(sc);
1410	npe_stoprecv(sc);
1411	/* XXX go into loopback & drain q's? */
1412	/* XXX but beware of disabling tx above */
1413
1414	/*
1415	 * The MAC core rx/tx disable may leave the MAC hardware in an
1416	 * unpredictable state. A hw reset is executed before resetting
1417	 * all the MAC parameters to a known value.
1418	 */
1419	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1420	DELAY(NPE_MAC_RESET_DELAY);
1421	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1422	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1423}
1424
1425void
1426npewatchdog(struct npe_softc *sc)
1427{
1428	NPE_ASSERT_LOCKED(sc);
1429
1430	if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1431		return;
1432
1433	device_printf(sc->sc_dev, "watchdog timeout\n");
1434	sc->sc_ifp->if_oerrors++;
1435
1436	npeinit_locked(sc);
1437}
1438
1439static int
1440npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1441{
1442	struct npe_softc *sc = ifp->if_softc;
1443 	struct mii_data *mii;
1444 	struct ifreq *ifr = (struct ifreq *)data;
1445	int error = 0;
1446#ifdef DEVICE_POLLING
1447	int mask;
1448#endif
1449
1450	switch (cmd) {
1451	case SIOCSIFFLAGS:
1452		NPE_LOCK(sc);
1453		if ((ifp->if_flags & IFF_UP) == 0 &&
1454		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1455			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1456			npestop(sc);
1457		} else {
1458			/* reinitialize card on any parameter change */
1459			npeinit_locked(sc);
1460		}
1461		NPE_UNLOCK(sc);
1462		break;
1463
1464	case SIOCADDMULTI:
1465	case SIOCDELMULTI:
1466		/* update multicast filter list. */
1467		NPE_LOCK(sc);
1468		npe_setmcast(sc);
1469		NPE_UNLOCK(sc);
1470		error = 0;
1471		break;
1472
1473  	case SIOCSIFMEDIA:
1474  	case SIOCGIFMEDIA:
1475 		mii = device_get_softc(sc->sc_mii);
1476 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1477  		break;
1478
1479#ifdef DEVICE_POLLING
1480	case SIOCSIFCAP:
1481		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1482		if (mask & IFCAP_POLLING) {
1483			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1484				error = ether_poll_register(npe_poll, ifp);
1485				if (error)
1486					return error;
1487				NPE_LOCK(sc);
1488				/* disable callbacks XXX txdone is shared */
1489				ixpqmgr_notify_disable(sc->rx_qid);
1490				ixpqmgr_notify_disable(sc->tx_doneqid);
1491				ifp->if_capenable |= IFCAP_POLLING;
1492				NPE_UNLOCK(sc);
1493			} else {
1494				error = ether_poll_deregister(ifp);
1495				/* NB: always enable qmgr callbacks */
1496				NPE_LOCK(sc);
1497				/* enable qmgr callbacks */
1498				ixpqmgr_notify_enable(sc->rx_qid,
1499				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1500				ixpqmgr_notify_enable(sc->tx_doneqid,
1501				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1502				ifp->if_capenable &= ~IFCAP_POLLING;
1503				NPE_UNLOCK(sc);
1504			}
1505		}
1506		break;
1507#endif
1508	default:
1509		error = ether_ioctl(ifp, cmd, data);
1510		break;
1511	}
1512	return error;
1513}
1514
1515/*
1516 * Setup a traffic class -> rx queue mapping.
1517 */
1518static int
1519npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1520{
1521	uint32_t msg[2];
1522
1523	msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
1524	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1525	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1526}
1527
1528static int
1529npe_setfirewallmode(struct npe_softc *sc, int onoff)
1530{
1531	uint32_t msg[2];
1532
1533	/* XXX honor onoff */
1534	msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
1535	msg[1] = 0;
1536	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1537}
1538
1539/*
1540 * Update and reset the statistics in the NPE.
1541 */
1542static int
1543npe_updatestats(struct npe_softc *sc)
1544{
1545	uint32_t msg[2];
1546
1547	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1548	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1549	return ixpnpe_sendmsg_async(sc->sc_npe, msg);
1550}
1551
1552#if 0
1553/*
1554 * Get the current statistics block.
1555 */
1556static int
1557npe_getstats(struct npe_softc *sc)
1558{
1559	uint32_t msg[2];
1560
1561	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1562	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1563	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1564}
1565
1566/*
1567 * Query the image id of the loaded firmware.
1568 */
1569static uint32_t
1570npe_getimageid(struct npe_softc *sc)
1571{
1572	uint32_t msg[2];
1573
1574	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1575	msg[1] = 0;
1576	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1577}
1578
1579/*
1580 * Enable/disable loopback.
1581 */
1582static int
1583npe_setloopback(struct npe_softc *sc, int ena)
1584{
1585	uint32_t msg[2];
1586
1587	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1588	msg[1] = 0;
1589	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1590}
1591#endif
1592
1593static void
1594npe_child_detached(device_t dev, device_t child)
1595{
1596	struct npe_softc *sc;
1597
1598	sc = device_get_softc(dev);
1599	if (child == sc->sc_mii)
1600		sc->sc_mii = NULL;
1601}
1602
1603/*
1604 * MII bus support routines.
1605 */
1606#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1607#define	MII_WR4(sc, reg, v) \
1608	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1609
1610static uint32_t
1611npe_mii_mdio_read(struct npe_softc *sc, int reg)
1612{
1613	uint32_t v;
1614
1615	/* NB: registers are known to be sequential */
1616	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1617	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1618	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1619	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1620	return v;
1621}
1622
1623static void
1624npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1625{
1626	/* NB: registers are known to be sequential */
1627	MII_WR4(sc, reg+0, cmd & 0xff);
1628	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1629	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1630	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1631}
1632
1633static int
1634npe_mii_mdio_wait(struct npe_softc *sc)
1635{
1636	uint32_t v;
1637	int i;
1638
1639	/* NB: typically this takes 25-30 trips */
1640	for (i = 0; i < 1000; i++) {
1641		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1642		if ((v & NPE_MII_GO) == 0)
1643			return 1;
1644		DELAY(1);
1645	}
1646	device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
1647	    __func__, v);
1648	return 0;		/* NB: timeout */
1649}
1650
1651static int
1652npe_miibus_readreg(device_t dev, int phy, int reg)
1653{
1654	struct npe_softc *sc = device_get_softc(dev);
1655	uint32_t v;
1656
1657	if (phy != sc->sc_phy)		/* XXX no auto-detect */
1658		return 0xffff;
1659	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
1660	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1661	if (npe_mii_mdio_wait(sc))
1662		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1663	else
1664		v = 0xffff | NPE_MII_READ_FAIL;
1665	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1666}
1667
1668static void
1669npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1670{
1671	struct npe_softc *sc = device_get_softc(dev);
1672	uint32_t v;
1673
1674	if (phy != sc->sc_phy)		/* XXX */
1675		return;
1676	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1677	  | data | NPE_MII_WRITE
1678	  | NPE_MII_GO;
1679	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1680	/* XXX complain about timeout */
1681	(void) npe_mii_mdio_wait(sc);
1682}
1683
1684static void
1685npe_miibus_statchg(device_t dev)
1686{
1687	struct npe_softc *sc = device_get_softc(dev);
1688	struct mii_data *mii = device_get_softc(sc->sc_mii);
1689	uint32_t tx1, rx1;
1690
1691	/* sync MAC duplex state */
1692	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1693	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1694	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1695		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1696		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1697	} else {
1698		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1699		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1700	}
1701	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1702	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1703}
1704
1705static device_method_t npe_methods[] = {
1706	/* Device interface */
1707	DEVMETHOD(device_probe,		npe_probe),
1708	DEVMETHOD(device_attach,	npe_attach),
1709	DEVMETHOD(device_detach,	npe_detach),
1710
1711	/* Bus interface */
1712	DEVMETHOD(bus_child_detached,	npe_child_detached),
1713
1714	/* MII interface */
1715	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1716	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1717	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1718
1719	{ 0, 0 }
1720};
1721
1722static driver_t npe_driver = {
1723	"npe",
1724	npe_methods,
1725	sizeof(struct npe_softc),
1726};
1727
1728DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1729DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1730MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1731MODULE_DEPEND(npe, miibus, 1, 1, 1);
1732MODULE_DEPEND(npe, ether, 1, 1, 1);
1733