if_npe.c revision 166064
1/*-
2 * Copyright (c) 2006 Sam Leffler.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 166064 2007-01-17 00:58:25Z cognet $");
27
28/*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 * XXX NPE-C port doesn't work yet
43 */
44#ifdef HAVE_KERNEL_OPTION_HEADERS
45#include "opt_device_polling.h"
46#endif
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/bus.h>
51#include <sys/kernel.h>
52#include <sys/mbuf.h>
53#include <sys/malloc.h>
54#include <sys/module.h>
55#include <sys/rman.h>
56#include <sys/socket.h>
57#include <sys/sockio.h>
58#include <sys/sysctl.h>
59#include <sys/endian.h>
60#include <machine/bus.h>
61
62#include <net/ethernet.h>
63#include <net/if.h>
64#include <net/if_arp.h>
65#include <net/if_dl.h>
66#include <net/if_media.h>
67#include <net/if_mib.h>
68#include <net/if_types.h>
69
70#ifdef INET
71#include <netinet/in.h>
72#include <netinet/in_systm.h>
73#include <netinet/in_var.h>
74#include <netinet/ip.h>
75#endif
76
77#include <net/bpf.h>
78#include <net/bpfdesc.h>
79
80#include <arm/xscale/ixp425/ixp425reg.h>
81#include <arm/xscale/ixp425/ixp425var.h>
82#include <arm/xscale/ixp425/ixp425_qmgr.h>
83#include <arm/xscale/ixp425/ixp425_npevar.h>
84
85#include <dev/mii/mii.h>
86#include <dev/mii/miivar.h>
87#include <arm/xscale/ixp425/if_npereg.h>
88
89#include "miibus_if.h"
90
91/*
92 * XXX: For the main bus dma tag. Can go away if the new method to get the
93 * dma tag from the parent got MFC'd into RELENG_6.
94 */
95extern struct ixp425_softc *ixp425_softc;
96
97struct npebuf {
98	struct npebuf	*ix_next;	/* chain to next buffer */
99	void		*ix_m;		/* backpointer to mbuf */
100	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
101	struct npehwbuf	*ix_hw;		/* associated h/w block */
102	uint32_t	ix_neaddr;	/* phys address of ix_hw */
103};
104
105struct npedma {
106	const char*	name;
107	int		nbuf;		/* # npebuf's allocated */
108	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
109	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
110	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
111	bus_dmamap_t	buf_map;
112	bus_addr_t	buf_phys;	/* phys addr of buffers */
113	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
114};
115
116struct npe_softc {
117	/* XXX mii requires this be first; do not move! */
118	struct ifnet	*sc_ifp;	/* ifnet pointer */
119	struct mtx	sc_mtx;		/* basically a perimeter lock */
120	device_t	sc_dev;
121	bus_space_tag_t	sc_iot;
122	bus_space_handle_t sc_ioh;	/* MAC register window */
123	device_t	sc_mii;		/* child miibus */
124	bus_space_handle_t sc_miih;	/* MII register window */
125	struct ixpnpe_softc *sc_npe;	/* NPE support */
126	int		sc_debug;	/* DPRINTF* control */
127	int		sc_tickinterval;
128	struct callout	tick_ch;	/* Tick callout */
129	struct npedma	txdma;
130	struct npebuf	*tx_free;	/* list of free tx buffers */
131	struct npedma	rxdma;
132	bus_addr_t	buf_phys;	/* XXX for returning a value */
133	int		rx_qid;		/* rx qid */
134	int		rx_freeqid;	/* rx free buffers qid */
135	int		tx_qid;		/* tx qid */
136	int		tx_doneqid;	/* tx completed qid */
137	struct ifmib_iso_8802_3 mibdata;
138	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
139	struct npestats	*sc_stats;
140	bus_dmamap_t	sc_stats_map;
141	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
142};
143
144/*
145 * Per-unit static configuration for IXP425.  The tx and
146 * rx free Q id's are fixed by the NPE microcode.  The
147 * rx Q id's are programmed to be separate to simplify
148 * multi-port processing.  It may be better to handle
149 * all traffic through one Q (as done by the Intel drivers).
150 *
151 * Note that the PHY's are accessible only from MAC A
152 * on the IXP425.  This and other platform-specific
153 * assumptions probably need to be handled through hints.
154 */
155static const struct {
156	const char	*desc;		/* device description */
157	int		npeid;		/* NPE assignment */
158	uint32_t	imageid;	/* NPE firmware image id */
159	uint32_t	regbase;
160	int		regsize;
161	uint32_t	miibase;
162	int		miisize;
163	uint8_t		rx_qid;
164	uint8_t		rx_freeqid;
165	uint8_t		tx_qid;
166	uint8_t		tx_doneqid;
167} npeconfig[NPE_PORTS_MAX] = {
168	{ .desc		= "IXP NPE-B",
169	  .npeid	= NPE_B,
170	  .imageid	= IXP425_NPE_B_IMAGEID,
171	  .regbase	= IXP425_MAC_A_HWBASE,
172	  .regsize	= IXP425_MAC_A_SIZE,
173	  .miibase	= IXP425_MAC_A_HWBASE,
174	  .miisize	= IXP425_MAC_A_SIZE,
175	  .rx_qid	= 4,
176	  .rx_freeqid	= 27,
177	  .tx_qid	= 24,
178	  .tx_doneqid	= 31
179	},
180	{ .desc		= "IXP NPE-C",
181	  .npeid	= NPE_C,
182	  .imageid	= IXP425_NPE_C_IMAGEID,
183	  .regbase	= IXP425_MAC_B_HWBASE,
184	  .regsize	= IXP425_MAC_B_SIZE,
185	  .miibase	= IXP425_MAC_A_HWBASE,
186	  .miisize	= IXP425_MAC_A_SIZE,
187	  .rx_qid	= 12,
188	  .rx_freeqid	= 28,
189	  .tx_qid	= 25,
190	  .tx_doneqid	= 31
191	},
192};
193static struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
194
195static __inline uint32_t
196RD4(struct npe_softc *sc, bus_size_t off)
197{
198	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
199}
200
201static __inline void
202WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
203{
204	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
205}
206
207#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
208#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
209#define NPE_LOCK_INIT(_sc) \
210	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
211	    MTX_NETWORK_LOCK, MTX_DEF)
212#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
213#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
214#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
215
216static devclass_t npe_devclass;
217
218static int	npe_activate(device_t dev);
219static void	npe_deactivate(device_t dev);
220static int	npe_ifmedia_update(struct ifnet *ifp);
221static void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
222static void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
223static void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
224static void	npe_txdone(int qid, void *arg);
225static int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
226			struct mbuf *);
227static void	npe_rxdone(int qid, void *arg);
228static void	npeinit(void *);
229static void	npestart_locked(struct ifnet *);
230static void	npestart(struct ifnet *);
231static void	npestop(struct npe_softc *);
232static void	npewatchdog(struct ifnet *);
233static int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
234
235static int	npe_setrxqosentry(struct npe_softc *, int classix,
236			int trafclass, int qid);
237static int	npe_updatestats(struct npe_softc *);
238#if 0
239static int	npe_getstats(struct npe_softc *);
240static uint32_t	npe_getimageid(struct npe_softc *);
241static int	npe_setloopback(struct npe_softc *, int ena);
242#endif
243
244/* NB: all tx done processing goes through one queue */
245static int tx_doneqid = -1;
246
247SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters");
248
249static int npe_debug = 0;
250SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
251	   0, "IXP425 NPE network interface debug msgs");
252TUNABLE_INT("hw.npe.npe", &npe_debug);
253#define	DPRINTF(sc, fmt, ...) do {					\
254	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
255} while (0)
256#define	DPRINTFn(n, sc, fmt, ...) do {					\
257	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
258} while (0)
259static int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
260SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
261	    0, "periodic work interval (secs)");
262TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
263
264static	int npe_rxbuf = 64;		/* # rx buffers to allocate */
265SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
266	    0, "rx buffers allocated");
267TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
268static	int npe_txbuf = 128;		/* # tx buffers to allocate */
269SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
270	    0, "tx buffers allocated");
271TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
272
273static int
274npe_probe(device_t dev)
275{
276	int unit = device_get_unit(dev);
277
278	if (unit >= NPE_PORTS_MAX) {
279		device_printf(dev, "unit %d not supported\n", unit);
280		return EINVAL;
281	}
282	/* XXX check feature register to see if enabled */
283	device_set_desc(dev, npeconfig[unit].desc);
284	return 0;
285}
286
287static int
288npe_attach(device_t dev)
289{
290	struct npe_softc *sc = device_get_softc(dev);
291	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
292	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
293	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
294	struct ifnet *ifp = NULL;
295	int error;
296	u_char eaddr[6];
297
298	sc->sc_dev = dev;
299	sc->sc_iot = sa->sc_iot;
300	NPE_LOCK_INIT(sc);
301	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
302	sc->sc_debug = npe_debug;
303	sc->sc_tickinterval = npe_tickinterval;
304
305	sc->sc_npe = ixpnpe_attach(dev);
306	if (sc->sc_npe == NULL) {
307		error = EIO;		/* XXX */
308		goto out;
309	}
310
311	error = npe_activate(dev);
312	if (error)
313		goto out;
314
315	npe_getmac(sc, eaddr);
316
317	/* NB: must be setup prior to invoking mii code */
318	sc->sc_ifp = ifp = if_alloc(IFT_ETHER);
319	if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
320		device_printf(dev, "Cannot find my PHY.\n");
321		error = ENXIO;
322		goto out;
323	}
324
325	ifp->if_softc = sc;
326	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
327	ifp->if_mtu = ETHERMTU;
328	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
329	ifp->if_start = npestart;
330	ifp->if_ioctl = npeioctl;
331	ifp->if_watchdog = npewatchdog;
332	ifp->if_init = npeinit;
333	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
334	ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
335	IFQ_SET_READY(&ifp->if_snd);
336	ifp->if_timer = 0;
337	ifp->if_linkmib = &sc->mibdata;
338	ifp->if_linkmiblen = sizeof(sc->mibdata);
339	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
340#ifdef DEVICE_POLLING
341	ifp->if_capabilities |= IFCAP_POLLING;
342#endif
343
344	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
345	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
346	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
347	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
348
349	ether_ifattach(ifp, eaddr);
350	return 0;
351out:
352	npe_deactivate(dev);
353	if (ifp != NULL)
354		if_free(ifp);
355	return error;
356}
357
358static int
359npe_detach(device_t dev)
360{
361	struct npe_softc *sc = device_get_softc(dev);
362	struct ifnet *ifp = sc->sc_ifp;
363
364#ifdef DEVICE_POLLING
365	if (ifp->if_capenable & IFCAP_POLLING)
366		ether_poll_deregister(ifp);
367#endif
368	npestop(sc);
369	if (ifp != NULL) {
370		ether_ifdetach(ifp);
371		if_free(ifp);
372	}
373	NPE_LOCK_DESTROY(sc);
374	npe_deactivate(dev);
375	if (sc->sc_npe != NULL)
376		ixpnpe_detach(sc->sc_npe);
377	return 0;
378}
379
380/*
381 * Compute and install the multicast filter.
382 */
383static void
384npe_setmcast(struct npe_softc *sc)
385{
386	struct ifnet *ifp = sc->sc_ifp;
387	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
388	int i;
389
390	if (ifp->if_flags & IFF_PROMISC) {
391		memset(mask, 0, ETHER_ADDR_LEN);
392		memset(addr, 0, ETHER_ADDR_LEN);
393	} else if (ifp->if_flags & IFF_ALLMULTI) {
394		static const uint8_t allmulti[ETHER_ADDR_LEN] =
395		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
396		memcpy(mask, allmulti, ETHER_ADDR_LEN);
397		memcpy(addr, allmulti, ETHER_ADDR_LEN);
398	} else {
399		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
400		struct ifmultiaddr *ifma;
401		const uint8_t *mac;
402
403		memset(clr, 0, ETHER_ADDR_LEN);
404		memset(set, 0xff, ETHER_ADDR_LEN);
405
406		IF_ADDR_LOCK(ifp);
407		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
408			if (ifma->ifma_addr->sa_family != AF_LINK)
409				continue;
410			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
411			for (i = 0; i < ETHER_ADDR_LEN; i++) {
412				clr[i] |= mac[i];
413				set[i] &= mac[i];
414			}
415		}
416		IF_ADDR_UNLOCK(ifp);
417
418		for (i = 0; i < ETHER_ADDR_LEN; i++) {
419			mask[i] = set[i] | ~clr[i];
420			addr[i] = set[i];
421		}
422	}
423
424	/*
425	 * Write the mask and address registers.
426	 */
427	for (i = 0; i < ETHER_ADDR_LEN; i++) {
428		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
429		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
430	}
431}
432
433static void
434npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
435{
436	struct npe_softc *sc;
437
438	if (error != 0)
439		return;
440	sc = (struct npe_softc *)arg;
441	sc->buf_phys = segs[0].ds_addr;
442}
443
444static int
445npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
446	const char *name, int nbuf, int maxseg)
447{
448	int error, i;
449
450	memset(dma, 0, sizeof(dma));
451
452	dma->name = name;
453	dma->nbuf = nbuf;
454
455	/* DMA tag for mapped mbufs  */
456	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
457	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
458	    MCLBYTES, maxseg, MCLBYTES, 0,
459	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
460	if (error != 0) {
461		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
462		     "error %u\n", dma->name, error);
463		return error;
464	}
465
466	/* DMA tag and map for the NPE buffers */
467	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
468	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
469	    nbuf * sizeof(struct npehwbuf), 1,
470	    nbuf * sizeof(struct npehwbuf), 0,
471	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
472	if (error != 0) {
473		device_printf(sc->sc_dev,
474		    "unable to create %s npebuf dma tag, error %u\n",
475		    dma->name, error);
476		return error;
477	}
478	/* XXX COHERENT for now */
479	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
480	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
481	    &dma->buf_map) != 0) {
482		device_printf(sc->sc_dev,
483		     "unable to allocate memory for %s h/w buffers, error %u\n",
484		     dma->name, error);
485		return error;
486	}
487	/* XXX M_TEMP */
488	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
489	if (dma->buf == NULL) {
490		device_printf(sc->sc_dev,
491		     "unable to allocate memory for %s s/w buffers\n",
492		     dma->name);
493		return error;
494	}
495	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
496	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
497		device_printf(sc->sc_dev,
498		     "unable to map memory for %s h/w buffers, error %u\n",
499		     dma->name, error);
500		return error;
501	}
502	dma->buf_phys = sc->buf_phys;
503	for (i = 0; i < dma->nbuf; i++) {
504		struct npebuf *npe = &dma->buf[i];
505		struct npehwbuf *hw = &dma->hwbuf[i];
506
507		/* calculate offset to shared area */
508		npe->ix_neaddr = dma->buf_phys +
509			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
510		KASSERT((npe->ix_neaddr & 0x1f) == 0,
511		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
512		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
513				&npe->ix_map);
514		if (error != 0) {
515			device_printf(sc->sc_dev,
516			     "unable to create dmamap for %s buffer %u, "
517			     "error %u\n", dma->name, i, error);
518			return error;
519		}
520		npe->ix_hw = hw;
521	}
522	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
523	return 0;
524}
525
526static void
527npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
528{
529	int i;
530
531	if (dma->hwbuf != NULL) {
532		for (i = 0; i < dma->nbuf; i++) {
533			struct npebuf *npe = &dma->buf[i];
534			bus_dmamap_destroy(dma->mtag, npe->ix_map);
535		}
536		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
537		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
538		bus_dmamap_destroy(dma->buf_tag, dma->buf_map);
539	}
540	if (dma->buf != NULL)
541		free(dma->buf, M_TEMP);
542	if (dma->buf_tag)
543		bus_dma_tag_destroy(dma->buf_tag);
544	if (dma->mtag)
545		bus_dma_tag_destroy(dma->mtag);
546	memset(dma, 0, sizeof(*dma));
547}
548
549static int
550npe_activate(device_t dev)
551{
552	struct npe_softc * sc = device_get_softc(dev);
553	int unit = device_get_unit(dev);
554	int error, i;
555
556	/* load NPE firmware and start it running */
557	error = ixpnpe_init(sc->sc_npe, "npe_fw", npeconfig[unit].imageid);
558	if (error != 0)
559		return error;
560
561	if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase,
562	    npeconfig[unit].regsize, 0, &sc->sc_ioh)) {
563		device_printf(dev, "Cannot map registers 0x%x:0x%x\n",
564		    npeconfig[unit].regbase, npeconfig[unit].regsize);
565		return ENOMEM;
566	}
567
568	if (npeconfig[unit].miibase != npeconfig[unit].regbase) {
569		/*
570		 * The PHY's are only accessible from one MAC (it appears)
571		 * so for other MAC's setup an additional mapping for
572		 * frobbing the PHY registers.
573		 */
574		if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase,
575		    npeconfig[unit].miisize, 0, &sc->sc_miih)) {
576			device_printf(dev,
577			    "Cannot map MII registers 0x%x:0x%x\n",
578			    npeconfig[unit].miibase, npeconfig[unit].miisize);
579			return ENOMEM;
580		}
581	} else
582		sc->sc_miih = sc->sc_ioh;
583	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
584	if (error != 0)
585		return error;
586	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
587	if (error != 0)
588		return error;
589
590	/* setup statistics block */
591	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
592	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
593	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
594	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
595	if (error != 0) {
596		device_printf(sc->sc_dev, "unable to create stats tag, "
597		     "error %u\n", error);
598		return error;
599	}
600	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
601	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
602		device_printf(sc->sc_dev,
603		     "unable to allocate memory for stats block, error %u\n",
604		     error);
605		return error;
606	}
607	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
608	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
609		device_printf(sc->sc_dev,
610		     "unable to load memory for stats block, error %u\n",
611		     error);
612		return error;
613	}
614	sc->sc_stats_phys = sc->buf_phys;
615
616	/* XXX disable half-bridge LEARNING+FILTERING feature */
617
618	/*
619	 * Setup h/w rx/tx queues.  There are four q's:
620	 *   rx		inbound q of rx'd frames
621	 *   rx_free	pool of ixpbuf's for receiving frames
622	 *   tx		outbound q of frames to send
623	 *   tx_done	q of tx frames that have been processed
624	 *
625	 * The NPE handles the actual tx/rx process and the q manager
626	 * handles the queues.  The driver just writes entries to the
627	 * q manager mailbox's and gets callbacks when there are rx'd
628	 * frames to process or tx'd frames to reap.  These callbacks
629	 * are controlled by the q configurations; e.g. we get a
630	 * callback when tx_done has 2 or more frames to process and
631	 * when the rx q has at least one frame.  These setings can
632	 * changed at the time the q is configured.
633	 */
634	sc->rx_qid = npeconfig[unit].rx_qid;
635	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
636		IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc);
637	sc->rx_freeqid = npeconfig[unit].rx_freeqid;
638	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
639	/* tell the NPE to direct all traffic to rx_qid */
640#if 0
641	for (i = 0; i < 8; i++)
642#else
643device_printf(sc->sc_dev, "remember to fix rx q setup\n");
644	for (i = 0; i < 4; i++)
645#endif
646		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
647
648	sc->tx_qid = npeconfig[unit].tx_qid;
649	sc->tx_doneqid = npeconfig[unit].tx_doneqid;
650	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
651	if (tx_doneqid == -1) {
652		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
653			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
654		tx_doneqid = sc->tx_doneqid;
655	}
656
657	KASSERT(npes[npeconfig[unit].npeid] == NULL,
658	    ("npe %u already setup", npeconfig[unit].npeid));
659	npes[npeconfig[unit].npeid] = sc;
660
661	return 0;
662}
663
664static void
665npe_deactivate(device_t dev)
666{
667	struct npe_softc *sc = device_get_softc(dev);
668	int unit = device_get_unit(dev);
669
670	npes[npeconfig[unit].npeid] = NULL;
671
672	/* XXX disable q's */
673	if (sc->sc_npe != NULL)
674		ixpnpe_stop(sc->sc_npe);
675	if (sc->sc_stats != NULL) {
676		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
677		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
678			sc->sc_stats_map);
679		bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map);
680	}
681	if (sc->sc_stats_tag != NULL)
682		bus_dma_tag_destroy(sc->sc_stats_tag);
683	npe_dma_destroy(sc, &sc->txdma);
684	npe_dma_destroy(sc, &sc->rxdma);
685	bus_generic_detach(sc->sc_dev);
686	if (sc->sc_mii)
687		device_delete_child(sc->sc_dev, sc->sc_mii);
688#if 0
689	/* XXX sc_ioh and sc_miih */
690	if (sc->mem_res)
691		bus_release_resource(dev, SYS_RES_IOPORT,
692		    rman_get_rid(sc->mem_res), sc->mem_res);
693	sc->mem_res = 0;
694#endif
695}
696
697/*
698 * Change media according to request.
699 */
700static int
701npe_ifmedia_update(struct ifnet *ifp)
702{
703	struct npe_softc *sc = ifp->if_softc;
704	struct mii_data *mii;
705
706	mii = device_get_softc(sc->sc_mii);
707	NPE_LOCK(sc);
708	mii_mediachg(mii);
709	/* XXX push state ourself? */
710	NPE_UNLOCK(sc);
711	return (0);
712}
713
714/*
715 * Notify the world which media we're using.
716 */
717static void
718npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
719{
720	struct npe_softc *sc = ifp->if_softc;
721	struct mii_data *mii;
722
723	mii = device_get_softc(sc->sc_mii);
724	NPE_LOCK(sc);
725	mii_pollstat(mii);
726	ifmr->ifm_active = mii->mii_media_active;
727	ifmr->ifm_status = mii->mii_media_status;
728	NPE_UNLOCK(sc);
729}
730
731static void
732npe_addstats(struct npe_softc *sc)
733{
734#define	MIBADD(x)	sc->mibdata.x += be32toh(ns->x)
735	struct ifnet *ifp = sc->sc_ifp;
736	struct npestats *ns = sc->sc_stats;
737
738	MIBADD(dot3StatsAlignmentErrors);
739	MIBADD(dot3StatsFCSErrors);
740	MIBADD(dot3StatsSingleCollisionFrames);
741	MIBADD(dot3StatsMultipleCollisionFrames);
742	MIBADD(dot3StatsDeferredTransmissions);
743	MIBADD(dot3StatsLateCollisions);
744	MIBADD(dot3StatsExcessiveCollisions);
745	MIBADD(dot3StatsInternalMacTransmitErrors);
746	MIBADD(dot3StatsCarrierSenseErrors);
747	sc->mibdata.dot3StatsFrameTooLongs +=
748	      be32toh(ns->RxLargeFramesDiscards)
749	    + be32toh(ns->TxLargeFrameDiscards);
750	MIBADD(dot3StatsInternalMacReceiveErrors);
751	sc->mibdata.dot3StatsMissedFrames +=
752	      be32toh(ns->RxOverrunDiscards)
753	    + be32toh(ns->RxUnderflowEntryDiscards);
754
755	ifp->if_oerrors +=
756		  be32toh(ns->dot3StatsInternalMacTransmitErrors)
757		+ be32toh(ns->dot3StatsCarrierSenseErrors)
758		+ be32toh(ns->TxVLANIdFilterDiscards)
759		;
760	ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
761		+ be32toh(ns->dot3StatsInternalMacReceiveErrors)
762		+ be32toh(ns->RxOverrunDiscards)
763		+ be32toh(ns->RxUnderflowEntryDiscards)
764		;
765	ifp->if_collisions +=
766		  be32toh(ns->dot3StatsSingleCollisionFrames)
767		+ be32toh(ns->dot3StatsMultipleCollisionFrames)
768		;
769#undef MIBADD
770}
771
772static void
773npe_tick(void *xsc)
774{
775#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
776	struct npe_softc *sc = xsc;
777	struct mii_data *mii = device_get_softc(sc->sc_mii);
778	uint32_t msg[2];
779
780	NPE_ASSERT_LOCKED(sc);
781
782	/*
783	 * NB: to avoid sleeping with the softc lock held we
784	 * split the NPE msg processing into two parts.  The
785	 * request for statistics is sent w/o waiting for a
786	 * reply and then on the next tick we retrieve the
787	 * results.  This works because npe_tick is the only
788	 * code that talks via the mailbox's (except at setup).
789	 * This likely can be handled better.
790	 */
791	if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
792		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
793		    BUS_DMASYNC_POSTREAD);
794		npe_addstats(sc);
795	}
796	npe_updatestats(sc);
797	mii_tick(mii);
798
799	/* schedule next poll */
800	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
801#undef ACK
802}
803
804static void
805npe_setmac(struct npe_softc *sc, u_char *eaddr)
806{
807	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
808	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
809	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
810	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
811	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
812	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
813
814}
815
816static void
817npe_getmac(struct npe_softc *sc, u_char *eaddr)
818{
819	/* NB: the unicast address appears to be loaded from EEPROM on reset */
820	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
821	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
822	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
823	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
824	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
825	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
826}
827
828struct txdone {
829	struct npebuf *head;
830	struct npebuf **tail;
831	int count;
832};
833
834static __inline void
835npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
836{
837	struct ifnet *ifp = sc->sc_ifp;
838
839	NPE_LOCK(sc);
840	*td->tail = sc->tx_free;
841	sc->tx_free = td->head;
842	/*
843	 * We're no longer busy, so clear the busy flag and call the
844	 * start routine to xmit more packets.
845	 */
846	ifp->if_opackets += td->count;
847	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
848	ifp->if_timer = 0;
849	npestart_locked(ifp);
850	NPE_UNLOCK(sc);
851}
852
853/*
854 * Q manager callback on tx done queue.  Reap mbufs
855 * and return tx buffers to the free list.  Finally
856 * restart output.  Note the microcode has only one
857 * txdone q wired into it so we must use the NPE ID
858 * returned with each npehwbuf to decide where to
859 * send buffers.
860 */
861static void
862npe_txdone(int qid, void *arg)
863{
864#define	P2V(a, dma) \
865	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
866	struct npe_softc *sc0 = arg;
867	struct npe_softc *sc;
868	struct npebuf *npe;
869	struct txdone *td, q[NPE_MAX];
870	uint32_t entry;
871
872	/* XXX no NPE-A support */
873	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
874	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
875	/* XXX max # at a time? */
876	while (ixpqmgr_qread(qid, &entry) == 0) {
877		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
878		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
879
880		sc = npes[NPE_QM_Q_NPE(entry)];
881		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
882		m_freem(npe->ix_m);
883		npe->ix_m = NULL;
884
885		td = &q[NPE_QM_Q_NPE(entry)];
886		*td->tail = npe;
887		td->tail = &npe->ix_next;
888		td->count++;
889	}
890
891	if (q[NPE_B].count)
892		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
893	if (q[NPE_C].count)
894		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
895#undef P2V
896}
897
898static int
899npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
900{
901	bus_dma_segment_t segs[1];
902	struct npedma *dma = &sc->rxdma;
903	struct npehwbuf *hw;
904	int error, nseg;
905
906	if (m == NULL) {
907		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
908		if (m == NULL)
909			return ENOBUFS;
910	}
911	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
912		("ext_size %d", m->m_ext.ext_size));
913	m->m_pkthdr.len = m->m_len = 1536;
914	/* backload payload and align ip hdr */
915	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
916	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
917			segs, &nseg, 0);
918	if (error != 0) {
919		m_freem(m);
920		return error;
921	}
922	hw = npe->ix_hw;
923	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
924	/* NB: NPE requires length be a multiple of 64 */
925	/* NB: buffer length is shifted in word */
926	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
927	hw->ix_ne[0].next = 0;
928	npe->ix_m = m;
929	/* Flush the memory in the mbuf */
930	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
931	return 0;
932}
933
934/*
935 * RX q processing for a specific NPE.  Claim entries
936 * from the hardware queue and pass the frames up the
937 * stack. Pass the rx buffers to the free list.
938 */
939static void
940npe_rxdone(int qid, void *arg)
941{
942#define	P2V(a, dma) \
943	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
944	struct npe_softc *sc = arg;
945	struct npedma *dma = &sc->rxdma;
946	uint32_t entry;
947
948	while (ixpqmgr_qread(qid, &entry) == 0) {
949		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
950		struct mbuf *m;
951
952		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
953		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
954		/*
955		 * Allocate a new mbuf to replenish the rx buffer.
956		 * If doing so fails we drop the rx'd frame so we
957		 * can reuse the previous mbuf.  When we're able to
958		 * allocate a new mbuf dispatch the mbuf w/ rx'd
959		 * data up the stack and replace it with the newly
960		 * allocated one.
961		 */
962		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
963		if (m != NULL) {
964			struct mbuf *mrx = npe->ix_m;
965			struct npehwbuf *hw = npe->ix_hw;
966			struct ifnet *ifp = sc->sc_ifp;
967
968			/* Flush mbuf memory for rx'd data */
969			bus_dmamap_sync(dma->mtag, npe->ix_map,
970			    BUS_DMASYNC_POSTREAD);
971
972			/* XXX flush hw buffer; works now 'cuz coherent */
973			/* set m_len etc. per rx frame size */
974			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
975			mrx->m_pkthdr.len = mrx->m_len;
976			mrx->m_pkthdr.rcvif = ifp;
977			mrx->m_flags |= M_HASFCS;
978
979			ifp->if_ipackets++;
980			ifp->if_input(ifp, mrx);
981		} else {
982			/* discard frame and re-use mbuf */
983			m = npe->ix_m;
984		}
985		if (npe_rxbuf_init(sc, npe, m) == 0) {
986			/* return npe buf to rx free list */
987			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
988		} else {
989			/* XXX should not happen */
990		}
991	}
992#undef P2V
993}
994
995#ifdef DEVICE_POLLING
996static void
997npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
998{
999	struct npe_softc *sc = ifp->if_softc;
1000
1001	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1002		npe_rxdone(sc->rx_qid, sc);
1003		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1004	}
1005}
1006#endif /* DEVICE_POLLING */
1007
1008static void
1009npe_startxmit(struct npe_softc *sc)
1010{
1011	struct npedma *dma = &sc->txdma;
1012	int i;
1013
1014	NPE_ASSERT_LOCKED(sc);
1015	sc->tx_free = NULL;
1016	for (i = 0; i < dma->nbuf; i++) {
1017		struct npebuf *npe = &dma->buf[i];
1018		if (npe->ix_m != NULL) {
1019			/* NB: should not happen */
1020			device_printf(sc->sc_dev,
1021			    "%s: free mbuf at entry %u\n", __func__, i);
1022			m_freem(npe->ix_m);
1023		}
1024		npe->ix_m = NULL;
1025		npe->ix_next = sc->tx_free;
1026		sc->tx_free = npe;
1027	}
1028}
1029
1030static void
1031npe_startrecv(struct npe_softc *sc)
1032{
1033	struct npedma *dma = &sc->rxdma;
1034	struct npebuf *npe;
1035	int i;
1036
1037	NPE_ASSERT_LOCKED(sc);
1038	for (i = 0; i < dma->nbuf; i++) {
1039		npe = &dma->buf[i];
1040		npe_rxbuf_init(sc, npe, npe->ix_m);
1041		/* set npe buf on rx free list */
1042		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1043	}
1044}
1045
1046/*
1047 * Reset and initialize the chip
1048 */
1049static void
1050npeinit_locked(void *xsc)
1051{
1052	struct npe_softc *sc = xsc;
1053	struct ifnet *ifp = sc->sc_ifp;
1054
1055	NPE_ASSERT_LOCKED(sc);
1056if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1057
1058	/*
1059	 * Reset MAC core.
1060	 */
1061	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1062	DELAY(NPE_MAC_RESET_DELAY);
1063	/* configure MAC to generate MDC clock */
1064	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1065
1066	/* disable transmitter and reciver in the MAC */
1067 	WR4(sc, NPE_MAC_RX_CNTRL1,
1068	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1069 	WR4(sc, NPE_MAC_TX_CNTRL1,
1070	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1071
1072	/*
1073	 * Set the MAC core registers.
1074	 */
1075	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1076	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1077	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1078	/* thresholds determined by NPE firmware FS */
1079	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1080	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1081	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1082	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1083	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1084	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1085	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1086	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1087
1088	WR4(sc, NPE_MAC_TX_CNTRL1,
1089		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1090		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1091		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1092		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1093	/* XXX pad strip? */
1094	WR4(sc, NPE_MAC_RX_CNTRL1,
1095		  NPE_RX_CNTRL1_CRC_EN		/* include CRC/FCS */
1096		| NPE_RX_CNTRL1_PAUSE_EN);	/* ena pause frame handling */
1097	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1098
1099	npe_setmac(sc, IF_LLADDR(ifp));
1100	npe_setmcast(sc);
1101
1102	npe_startxmit(sc);
1103	npe_startrecv(sc);
1104
1105	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1106	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1107	ifp->if_timer = 0;		/* just in case */
1108
1109	/* enable transmitter and reciver in the MAC */
1110 	WR4(sc, NPE_MAC_RX_CNTRL1,
1111	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1112 	WR4(sc, NPE_MAC_TX_CNTRL1,
1113	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1114
1115	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1116}
1117
1118static void
1119npeinit(void *xsc)
1120{
1121	struct npe_softc *sc = xsc;
1122	NPE_LOCK(sc);
1123	npeinit_locked(sc);
1124	NPE_UNLOCK(sc);
1125}
1126
1127/*
1128 * Defragment an mbuf chain, returning at most maxfrags separate
1129 * mbufs+clusters.  If this is not possible NULL is returned and
1130 * the original mbuf chain is left in it's present (potentially
1131 * modified) state.  We use two techniques: collapsing consecutive
1132 * mbufs and replacing consecutive mbufs by a cluster.
1133 */
1134static struct mbuf *
1135npe_defrag(struct mbuf *m0, int how, int maxfrags)
1136{
1137	struct mbuf *m, *n, *n2, **prev;
1138	u_int curfrags;
1139
1140	/*
1141	 * Calculate the current number of frags.
1142	 */
1143	curfrags = 0;
1144	for (m = m0; m != NULL; m = m->m_next)
1145		curfrags++;
1146	/*
1147	 * First, try to collapse mbufs.  Note that we always collapse
1148	 * towards the front so we don't need to deal with moving the
1149	 * pkthdr.  This may be suboptimal if the first mbuf has much
1150	 * less data than the following.
1151	 */
1152	m = m0;
1153again:
1154	for (;;) {
1155		n = m->m_next;
1156		if (n == NULL)
1157			break;
1158		if ((m->m_flags & M_RDONLY) == 0 &&
1159		    n->m_len < M_TRAILINGSPACE(m)) {
1160			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
1161				n->m_len);
1162			m->m_len += n->m_len;
1163			m->m_next = n->m_next;
1164			m_free(n);
1165			if (--curfrags <= maxfrags)
1166				return m0;
1167		} else
1168			m = n;
1169	}
1170	KASSERT(maxfrags > 1,
1171		("maxfrags %u, but normal collapse failed", maxfrags));
1172	/*
1173	 * Collapse consecutive mbufs to a cluster.
1174	 */
1175	prev = &m0->m_next;		/* NB: not the first mbuf */
1176	while ((n = *prev) != NULL) {
1177		if ((n2 = n->m_next) != NULL &&
1178		    n->m_len + n2->m_len < MCLBYTES) {
1179			m = m_getcl(how, MT_DATA, 0);
1180			if (m == NULL)
1181				goto bad;
1182			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
1183			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
1184				n2->m_len);
1185			m->m_len = n->m_len + n2->m_len;
1186			m->m_next = n2->m_next;
1187			*prev = m;
1188			m_free(n);
1189			m_free(n2);
1190			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
1191				return m0;
1192			/*
1193			 * Still not there, try the normal collapse
1194			 * again before we allocate another cluster.
1195			 */
1196			goto again;
1197		}
1198		prev = &n->m_next;
1199	}
1200	/*
1201	 * No place where we can collapse to a cluster; punt.
1202	 * This can occur if, for example, you request 2 frags
1203	 * but the packet requires that both be clusters (we
1204	 * never reallocate the first mbuf to avoid moving the
1205	 * packet header).
1206	 */
1207bad:
1208	return NULL;
1209}
1210
1211/*
1212 * Dequeue packets and place on the h/w transmit queue.
1213 */
1214static void
1215npestart_locked(struct ifnet *ifp)
1216{
1217	struct npe_softc *sc = ifp->if_softc;
1218	struct npebuf *npe;
1219	struct npehwbuf *hw;
1220	struct mbuf *m, *n;
1221	struct npedma *dma = &sc->txdma;
1222	bus_dma_segment_t segs[NPE_MAXSEG];
1223	int nseg, len, error, i;
1224	uint32_t next;
1225
1226	NPE_ASSERT_LOCKED(sc);
1227	/* XXX can this happen? */
1228	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1229		return;
1230
1231	while (sc->tx_free != NULL) {
1232		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1233		if (m == NULL) {
1234			/* XXX? */
1235			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1236			return;
1237		}
1238		npe = sc->tx_free;
1239		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1240		    m, segs, &nseg, 0);
1241		if (error == EFBIG) {
1242			n = npe_defrag(m, M_DONTWAIT, NPE_MAXSEG);
1243			if (n == NULL) {
1244				if_printf(ifp, "%s: too many fragments %u\n",
1245				    __func__, nseg);
1246				m_freem(m);
1247				return;	/* XXX? */
1248			}
1249			m = n;
1250			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1251			    m, segs, &nseg, 0);
1252		}
1253		if (error != 0 || nseg == 0) {
1254			if_printf(ifp, "%s: error %u nseg %u\n",
1255			    __func__, error, nseg);
1256			m_freem(m);
1257			return;	/* XXX? */
1258		}
1259		sc->tx_free = npe->ix_next;
1260
1261		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1262
1263		/*
1264		 * Tap off here if there is a bpf listener.
1265		 */
1266		BPF_MTAP(ifp, m);
1267
1268		npe->ix_m = m;
1269		hw = npe->ix_hw;
1270		len = m->m_pkthdr.len;
1271		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1272		for (i = 0; i < nseg; i++) {
1273			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1274			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1275			hw->ix_ne[i].next = htobe32(next);
1276
1277			len = 0;		/* zero for segments > 1 */
1278			next += sizeof(hw->ix_ne[0]);
1279		}
1280		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1281		/* XXX flush descriptor instead of using uncached memory */
1282
1283		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1284		    __func__, sc->tx_qid, npe->ix_neaddr,
1285		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1286		/* stick it on the tx q */
1287		/* XXX add vlan priority */
1288		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1289
1290		ifp->if_timer = 5;
1291	}
1292	if (sc->tx_free == NULL)
1293		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1294}
1295
1296void
1297npestart(struct ifnet *ifp)
1298{
1299	struct npe_softc *sc = ifp->if_softc;
1300	NPE_LOCK(sc);
1301	npestart_locked(ifp);
1302	NPE_UNLOCK(sc);
1303}
1304
1305static void
1306npe_stopxmit(struct npe_softc *sc)
1307{
1308	struct npedma *dma = &sc->txdma;
1309	int i;
1310
1311	NPE_ASSERT_LOCKED(sc);
1312
1313	/* XXX qmgr */
1314	for (i = 0; i < dma->nbuf; i++) {
1315		struct npebuf *npe = &dma->buf[i];
1316
1317		if (npe->ix_m != NULL) {
1318			bus_dmamap_unload(dma->mtag, npe->ix_map);
1319			m_freem(npe->ix_m);
1320			npe->ix_m = NULL;
1321		}
1322	}
1323}
1324
1325static void
1326npe_stoprecv(struct npe_softc *sc)
1327{
1328	struct npedma *dma = &sc->rxdma;
1329	int i;
1330
1331	NPE_ASSERT_LOCKED(sc);
1332
1333	/* XXX qmgr */
1334	for (i = 0; i < dma->nbuf; i++) {
1335		struct npebuf *npe = &dma->buf[i];
1336
1337		if (npe->ix_m != NULL) {
1338			bus_dmamap_unload(dma->mtag, npe->ix_map);
1339			m_freem(npe->ix_m);
1340			npe->ix_m = NULL;
1341		}
1342	}
1343}
1344
1345/*
1346 * Turn off interrupts, and stop the nic.
1347 */
1348void
1349npestop(struct npe_softc *sc)
1350{
1351	struct ifnet *ifp = sc->sc_ifp;
1352
1353	/*  disable transmitter and reciver in the MAC  */
1354 	WR4(sc, NPE_MAC_RX_CNTRL1,
1355	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1356 	WR4(sc, NPE_MAC_TX_CNTRL1,
1357	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1358
1359	ifp->if_timer = 0;
1360	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1361
1362	callout_stop(&sc->tick_ch);
1363
1364	npe_stopxmit(sc);
1365	npe_stoprecv(sc);
1366	/* XXX go into loopback & drain q's? */
1367	/* XXX but beware of disabling tx above */
1368
1369	/*
1370	 * The MAC core rx/tx disable may leave the MAC hardware in an
1371	 * unpredictable state. A hw reset is executed before resetting
1372	 * all the MAC parameters to a known value.
1373	 */
1374	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1375	DELAY(NPE_MAC_RESET_DELAY);
1376	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1377	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1378}
1379
1380void
1381npewatchdog(struct ifnet *ifp)
1382{
1383	struct npe_softc *sc = ifp->if_softc;
1384
1385	NPE_LOCK(sc);
1386	if_printf(ifp, "device timeout\n");
1387	ifp->if_oerrors++;
1388	npeinit_locked(sc);
1389	NPE_UNLOCK(sc);
1390}
1391
1392static int
1393npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1394{
1395	struct npe_softc *sc = ifp->if_softc;
1396 	struct mii_data *mii;
1397 	struct ifreq *ifr = (struct ifreq *)data;
1398	int error = 0;
1399#ifdef DEVICE_POLLING
1400	int mask;
1401#endif
1402
1403	switch (cmd) {
1404	case SIOCSIFFLAGS:
1405		NPE_LOCK(sc);
1406		if ((ifp->if_flags & IFF_UP) == 0 &&
1407		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1408			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1409			npestop(sc);
1410		} else {
1411			/* reinitialize card on any parameter change */
1412			npeinit_locked(sc);
1413		}
1414		NPE_UNLOCK(sc);
1415		break;
1416
1417	case SIOCADDMULTI:
1418	case SIOCDELMULTI:
1419		/* update multicast filter list. */
1420		NPE_LOCK(sc);
1421		npe_setmcast(sc);
1422		NPE_UNLOCK(sc);
1423		error = 0;
1424		break;
1425
1426  	case SIOCSIFMEDIA:
1427  	case SIOCGIFMEDIA:
1428 		mii = device_get_softc(sc->sc_mii);
1429 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1430  		break;
1431
1432#ifdef DEVICE_POLLING
1433	case SIOCSIFCAP:
1434		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1435		if (mask & IFCAP_POLLING) {
1436			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1437				error = ether_poll_register(npe_poll, ifp);
1438				if (error)
1439					return error;
1440				NPE_LOCK(sc);
1441				/* disable callbacks XXX txdone is shared */
1442				ixpqmgr_notify_disable(sc->rx_qid);
1443				ixpqmgr_notify_disable(sc->tx_doneqid);
1444				ifp->if_capenable |= IFCAP_POLLING;
1445				NPE_UNLOCK(sc);
1446			} else {
1447				error = ether_poll_deregister(ifp);
1448				/* NB: always enable qmgr callbacks */
1449				NPE_LOCK(sc);
1450				/* enable qmgr callbacks */
1451				ixpqmgr_notify_enable(sc->rx_qid,
1452				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1453				ixpqmgr_notify_enable(sc->tx_doneqid,
1454				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1455				ifp->if_capenable &= ~IFCAP_POLLING;
1456				NPE_UNLOCK(sc);
1457			}
1458		}
1459		break;
1460#endif
1461	default:
1462		error = ether_ioctl(ifp, cmd, data);
1463		break;
1464	}
1465	return error;
1466}
1467
1468/*
1469 * Setup a traffic class -> rx queue mapping.
1470 */
1471static int
1472npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1473{
1474	int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid;
1475	uint32_t msg[2];
1476
1477	msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix;
1478	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1479	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1480}
1481
1482/*
1483 * Update and reset the statistics in the NPE.
1484 */
1485static int
1486npe_updatestats(struct npe_softc *sc)
1487{
1488	uint32_t msg[2];
1489
1490	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1491	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1492	return ixpnpe_sendmsg(sc->sc_npe, msg);		/* NB: no recv */
1493}
1494
1495#if 0
1496/*
1497 * Get the current statistics block.
1498 */
1499static int
1500npe_getstats(struct npe_softc *sc)
1501{
1502	uint32_t msg[2];
1503
1504	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1505	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1506	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1507}
1508
1509/*
1510 * Query the image id of the loaded firmware.
1511 */
1512static uint32_t
1513npe_getimageid(struct npe_softc *sc)
1514{
1515	uint32_t msg[2];
1516
1517	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1518	msg[1] = 0;
1519	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1520}
1521
1522/*
1523 * Enable/disable loopback.
1524 */
1525static int
1526npe_setloopback(struct npe_softc *sc, int ena)
1527{
1528	uint32_t msg[2];
1529
1530	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1531	msg[1] = 0;
1532	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1533}
1534#endif
1535
1536static void
1537npe_child_detached(device_t dev, device_t child)
1538{
1539	struct npe_softc *sc;
1540
1541	sc = device_get_softc(dev);
1542	if (child == sc->sc_mii)
1543		sc->sc_mii = NULL;
1544}
1545
1546/*
1547 * MII bus support routines.
1548 *
1549 * NB: ixp425 has one PHY per NPE
1550 */
1551static uint32_t
1552npe_mii_mdio_read(struct npe_softc *sc, int reg)
1553{
1554#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1555	uint32_t v;
1556
1557	/* NB: registers are known to be sequential */
1558	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1559	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1560	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1561	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1562	return v;
1563#undef MII_RD4
1564}
1565
1566static void
1567npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1568{
1569#define	MII_WR4(sc, reg, v) \
1570	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1571
1572	/* NB: registers are known to be sequential */
1573	MII_WR4(sc, reg+0, cmd & 0xff);
1574	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1575	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1576	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1577#undef MII_WR4
1578}
1579
1580static int
1581npe_mii_mdio_wait(struct npe_softc *sc)
1582{
1583#define	MAXTRIES	100	/* XXX */
1584	uint32_t v;
1585	int i;
1586
1587	for (i = 0; i < MAXTRIES; i++) {
1588		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1589		if ((v & NPE_MII_GO) == 0)
1590			return 1;
1591	}
1592	return 0;		/* NB: timeout */
1593#undef MAXTRIES
1594}
1595
1596static int
1597npe_miibus_readreg(device_t dev, int phy, int reg)
1598{
1599	struct npe_softc *sc = device_get_softc(dev);
1600	uint32_t v;
1601
1602	if (phy != device_get_unit(dev))	/* XXX */
1603		return 0xffff;
1604	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1605	  | NPE_MII_GO;
1606	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1607	if (npe_mii_mdio_wait(sc))
1608		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1609	else
1610		v = 0xffff | NPE_MII_READ_FAIL;
1611	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1612#undef MAXTRIES
1613}
1614
1615static void
1616npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1617{
1618	struct npe_softc *sc = device_get_softc(dev);
1619	uint32_t v;
1620
1621	if (phy != device_get_unit(dev))	/* XXX */
1622		return;
1623	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1624	  | data | NPE_MII_WRITE
1625	  | NPE_MII_GO;
1626	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1627	/* XXX complain about timeout */
1628	(void) npe_mii_mdio_wait(sc);
1629}
1630
1631static void
1632npe_miibus_statchg(device_t dev)
1633{
1634	struct npe_softc *sc = device_get_softc(dev);
1635	struct mii_data *mii = device_get_softc(sc->sc_mii);
1636	uint32_t tx1, rx1;
1637
1638	/* sync MAC duplex state */
1639	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1640	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1641	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1642		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1643		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1644	} else {
1645		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1646		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1647	}
1648	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1649	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1650}
1651
1652static device_method_t npe_methods[] = {
1653	/* Device interface */
1654	DEVMETHOD(device_probe,		npe_probe),
1655	DEVMETHOD(device_attach,	npe_attach),
1656	DEVMETHOD(device_detach,	npe_detach),
1657
1658	/* Bus interface */
1659	DEVMETHOD(bus_child_detached,	npe_child_detached),
1660
1661	/* MII interface */
1662	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1663	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1664	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1665
1666	{ 0, 0 }
1667};
1668
1669static driver_t npe_driver = {
1670	"npe",
1671	npe_methods,
1672	sizeof(struct npe_softc),
1673};
1674
1675DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1676DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1677MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1678MODULE_DEPEND(npe, miibus, 1, 1, 1);
1679MODULE_DEPEND(npe, ether, 1, 1, 1);
1680