if_npe.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2006-2008 Sam Leffler.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/arm/xscale/ixp425/if_npe.c 330897 2018-03-14 03:19:51Z eadler $");
29
30/*
31 * Intel XScale NPE Ethernet driver.
32 *
33 * This driver handles the two ports present on the IXP425.
34 * Packet processing is done by the Network Processing Engines
35 * (NPE's) that work together with a MAC and PHY. The MAC
36 * is also mapped to the XScale cpu; the PHY is accessed via
37 * the MAC. NPE-XScale communication happens through h/w
38 * queues managed by the Q Manager block.
39 *
40 * The code here replaces the ethAcc, ethMii, and ethDB classes
41 * in the Intel Access Library (IAL) and the OS-specific driver.
42 *
43 * XXX add vlan support
44 */
45#ifdef HAVE_KERNEL_OPTION_HEADERS
46#include "opt_device_polling.h"
47#endif
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/bus.h>
52#include <sys/kernel.h>
53#include <sys/mbuf.h>
54#include <sys/malloc.h>
55#include <sys/module.h>
56#include <sys/rman.h>
57#include <sys/socket.h>
58#include <sys/sockio.h>
59#include <sys/sysctl.h>
60#include <sys/endian.h>
61#include <machine/bus.h>
62
63#include <net/ethernet.h>
64#include <net/if.h>
65#include <net/if_arp.h>
66#include <net/if_dl.h>
67#include <net/if_media.h>
68#include <net/if_mib.h>
69#include <net/if_types.h>
70#include <net/if_var.h>
71
72#ifdef INET
73#include <netinet/in.h>
74#include <netinet/in_systm.h>
75#include <netinet/in_var.h>
76#include <netinet/ip.h>
77#endif
78
79#include <net/bpf.h>
80#include <net/bpfdesc.h>
81
82#include <arm/xscale/ixp425/ixp425reg.h>
83#include <arm/xscale/ixp425/ixp425var.h>
84#include <arm/xscale/ixp425/ixp425_qmgr.h>
85#include <arm/xscale/ixp425/ixp425_npevar.h>
86
87#include <dev/mii/mii.h>
88#include <dev/mii/miivar.h>
89#include <arm/xscale/ixp425/if_npereg.h>
90
91#include <machine/armreg.h>
92
93#include "miibus_if.h"
94
95/*
96 * XXX: For the main bus dma tag. Can go away if the new method to get the
97 * dma tag from the parent got MFC'd into RELENG_6.
98 */
99extern struct ixp425_softc *ixp425_softc;
100
101struct npebuf {
102	struct npebuf	*ix_next;	/* chain to next buffer */
103	void		*ix_m;		/* backpointer to mbuf */
104	bus_dmamap_t	ix_map;		/* bus dma map for associated data */
105	struct npehwbuf	*ix_hw;		/* associated h/w block */
106	uint32_t	ix_neaddr;	/* phys address of ix_hw */
107};
108
109struct npedma {
110	const char*	name;
111	int		nbuf;		/* # npebuf's allocated */
112	bus_dma_tag_t	mtag;		/* bus dma tag for mbuf data */
113	struct npehwbuf	*hwbuf;		/* NPE h/w buffers */
114	bus_dma_tag_t	buf_tag;	/* tag+map for NPE buffers */
115	bus_dmamap_t	buf_map;
116	bus_addr_t	buf_phys;	/* phys addr of buffers */
117	struct npebuf	*buf;		/* s/w buffers (1-1 w/ h/w) */
118};
119
120struct npe_softc {
121	/* XXX mii requires this be first; do not move! */
122	struct ifnet	*sc_ifp;	/* ifnet pointer */
123	struct mtx	sc_mtx;		/* basically a perimeter lock */
124	device_t	sc_dev;
125	bus_space_tag_t	sc_iot;
126	bus_space_handle_t sc_ioh;	/* MAC register window */
127	device_t	sc_mii;		/* child miibus */
128	bus_space_handle_t sc_miih;	/* MII register window */
129	int		sc_npeid;
130	struct ixpnpe_softc *sc_npe;	/* NPE support */
131	int		sc_debug;	/* DPRINTF* control */
132	int		sc_tickinterval;
133	struct callout	tick_ch;	/* Tick callout */
134	int		npe_watchdog_timer;
135	struct npedma	txdma;
136	struct npebuf	*tx_free;	/* list of free tx buffers */
137	struct npedma	rxdma;
138	bus_addr_t	buf_phys;	/* XXX for returning a value */
139	int		rx_qid;		/* rx qid */
140	int		rx_freeqid;	/* rx free buffers qid */
141	int		tx_qid;		/* tx qid */
142	int		tx_doneqid;	/* tx completed qid */
143	struct ifmib_iso_8802_3 mibdata;
144	bus_dma_tag_t	sc_stats_tag;	/* bus dma tag for stats block */
145	struct npestats	*sc_stats;
146	bus_dmamap_t	sc_stats_map;
147	bus_addr_t	sc_stats_phys;	/* phys addr of sc_stats */
148	struct npestats	sc_totals;	/* accumulated sc_stats */
149};
150
151/*
152 * Static configuration for IXP425.  The tx and
153 * rx free Q id's are fixed by the NPE microcode.  The
154 * rx Q id's are programmed to be separate to simplify
155 * multi-port processing.  It may be better to handle
156 * all traffic through one Q (as done by the Intel drivers).
157 *
158 * Note that the PHY's are accessible only from MAC B on the
159 * IXP425 and from MAC C on other devices.  This and other
160 * platform-specific assumptions are handled with hints.
161 */
162static const struct {
163	uint32_t	macbase;
164	uint32_t	miibase;
165	int		phy;		/* phy id */
166	uint8_t		rx_qid;
167	uint8_t		rx_freeqid;
168	uint8_t		tx_qid;
169	uint8_t		tx_doneqid;
170} npeconfig[NPE_MAX] = {
171	[NPE_A] = {
172	  .macbase	= IXP435_MAC_A_HWBASE,
173	  .miibase	= IXP425_MAC_C_HWBASE,
174	  .phy		= 2,
175	  .rx_qid	= 4,
176	  .rx_freeqid	= 26,
177	  .tx_qid	= 23,
178	  .tx_doneqid	= 31
179	},
180	[NPE_B] = {
181	  .macbase	= IXP425_MAC_B_HWBASE,
182	  .miibase	= IXP425_MAC_B_HWBASE,
183	  .phy		= 0,
184	  .rx_qid	= 4,
185	  .rx_freeqid	= 27,
186	  .tx_qid	= 24,
187	  .tx_doneqid	= 31
188	},
189	[NPE_C] = {
190	  .macbase	= IXP425_MAC_C_HWBASE,
191	  .miibase	= IXP425_MAC_B_HWBASE,
192	  .phy		= 1,
193	  .rx_qid	= 12,
194	  .rx_freeqid	= 28,
195	  .tx_qid	= 25,
196	  .tx_doneqid	= 31
197	},
198};
199static struct npe_softc *npes[NPE_MAX];	/* NB: indexed by npeid */
200
201static __inline uint32_t
202RD4(struct npe_softc *sc, bus_size_t off)
203{
204	return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
205}
206
207static __inline void
208WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
209{
210	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
211}
212
213#define NPE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
214#define	NPE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
215#define NPE_LOCK_INIT(_sc) \
216	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
217	    MTX_NETWORK_LOCK, MTX_DEF)
218#define NPE_LOCK_DESTROY(_sc)	mtx_destroy(&_sc->sc_mtx);
219#define NPE_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
220#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
221
222static devclass_t npe_devclass;
223
224static int	override_npeid(device_t, const char *resname, int *val);
225static int	npe_activate(device_t dev);
226static void	npe_deactivate(device_t dev);
227static int	npe_ifmedia_update(struct ifnet *ifp);
228static void	npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
229static void	npe_setmac(struct npe_softc *sc, u_char *eaddr);
230static void	npe_getmac(struct npe_softc *sc, u_char *eaddr);
231static void	npe_txdone(int qid, void *arg);
232static int	npe_rxbuf_init(struct npe_softc *, struct npebuf *,
233			struct mbuf *);
234static int	npe_rxdone(int qid, void *arg);
235static void	npeinit(void *);
236static void	npestart_locked(struct ifnet *);
237static void	npestart(struct ifnet *);
238static void	npestop(struct npe_softc *);
239static void	npewatchdog(struct npe_softc *);
240static int	npeioctl(struct ifnet * ifp, u_long, caddr_t);
241
242static int	npe_setrxqosentry(struct npe_softc *, int classix,
243			int trafclass, int qid);
244static int	npe_setportaddress(struct npe_softc *, const uint8_t mac[]);
245static int	npe_setfirewallmode(struct npe_softc *, int onoff);
246static int	npe_updatestats(struct npe_softc *);
247#if 0
248static int	npe_getstats(struct npe_softc *);
249static uint32_t	npe_getimageid(struct npe_softc *);
250static int	npe_setloopback(struct npe_softc *, int ena);
251#endif
252
253/* NB: all tx done processing goes through one queue */
254static int tx_doneqid = -1;
255
256static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0,
257    "IXP4XX NPE driver parameters");
258
259static int npe_debug = 0;
260SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RWTUN, &npe_debug,
261	   0, "IXP4XX NPE network interface debug msgs");
262#define	DPRINTF(sc, fmt, ...) do {					\
263	if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__);	\
264} while (0)
265#define	DPRINTFn(n, sc, fmt, ...) do {					\
266	if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
267} while (0)
268static int npe_tickinterval = 3;		/* npe_tick frequency (secs) */
269SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RDTUN, &npe_tickinterval,
270	    0, "periodic work interval (secs)");
271
272static	int npe_rxbuf = 64;		/* # rx buffers to allocate */
273SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RDTUN, &npe_rxbuf,
274	    0, "rx buffers allocated");
275static	int npe_txbuf = 128;		/* # tx buffers to allocate */
276SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RDTUN, &npe_txbuf,
277	    0, "tx buffers allocated");
278
279static int
280unit2npeid(int unit)
281{
282	static const int npeidmap[2][3] = {
283		/* on 425 A is for HSS, B & C are for Ethernet */
284		{ NPE_B, NPE_C, -1 },	/* IXP425 */
285		/* 435 only has A & C, order C then A */
286		{ NPE_C, NPE_A, -1 },	/* IXP435 */
287	};
288	/* XXX check feature register instead */
289	return (unit < 3 ? npeidmap[
290	    (cpu_ident() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
291}
292
293static int
294npe_probe(device_t dev)
295{
296	static const char *desc[NPE_MAX] = {
297		[NPE_A] = "IXP NPE-A",
298		[NPE_B] = "IXP NPE-B",
299		[NPE_C] = "IXP NPE-C"
300	};
301	int unit = device_get_unit(dev);
302	int npeid;
303
304	if (unit > 2 ||
305	    (ixp4xx_read_feature_bits() &
306	     (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
307		return EINVAL;
308
309	npeid = -1;
310	if (!override_npeid(dev, "npeid", &npeid))
311		npeid = unit2npeid(unit);
312	if (npeid == -1) {
313		device_printf(dev, "unit %d not supported\n", unit);
314		return EINVAL;
315	}
316	device_set_desc(dev, desc[npeid]);
317	return 0;
318}
319
320static int
321npe_attach(device_t dev)
322{
323	struct npe_softc *sc = device_get_softc(dev);
324	struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
325	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
326	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
327	struct ifnet *ifp;
328	int error;
329	u_char eaddr[6];
330
331	sc->sc_dev = dev;
332	sc->sc_iot = sa->sc_iot;
333	NPE_LOCK_INIT(sc);
334	callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
335	sc->sc_debug = npe_debug;
336	sc->sc_tickinterval = npe_tickinterval;
337
338	ifp = if_alloc(IFT_ETHER);
339	if (ifp == NULL) {
340		device_printf(dev, "cannot allocate ifnet\n");
341		error = EIO;		/* XXX */
342		goto out;
343	}
344	/* NB: must be setup prior to invoking mii code */
345	sc->sc_ifp = ifp;
346
347	error = npe_activate(dev);
348	if (error) {
349		device_printf(dev, "cannot activate npe\n");
350		goto out;
351	}
352
353	npe_getmac(sc, eaddr);
354
355	ifp->if_softc = sc;
356	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
357	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
358	ifp->if_start = npestart;
359	ifp->if_ioctl = npeioctl;
360	ifp->if_init = npeinit;
361	IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
362	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
363	IFQ_SET_READY(&ifp->if_snd);
364	ifp->if_linkmib = &sc->mibdata;
365	ifp->if_linkmiblen = sizeof(sc->mibdata);
366	sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
367	/* device supports oversided vlan frames */
368	ifp->if_capabilities |= IFCAP_VLAN_MTU;
369	ifp->if_capenable = ifp->if_capabilities;
370#ifdef DEVICE_POLLING
371	ifp->if_capabilities |= IFCAP_POLLING;
372#endif
373
374	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
375	    CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
376	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
377	    CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
378	SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
379	    CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats");
380
381	ether_ifattach(ifp, eaddr);
382	return 0;
383out:
384	if (ifp != NULL)
385		if_free(ifp);
386	NPE_LOCK_DESTROY(sc);
387	npe_deactivate(dev);
388	return error;
389}
390
391static int
392npe_detach(device_t dev)
393{
394	struct npe_softc *sc = device_get_softc(dev);
395	struct ifnet *ifp = sc->sc_ifp;
396
397#ifdef DEVICE_POLLING
398	if (ifp->if_capenable & IFCAP_POLLING)
399		ether_poll_deregister(ifp);
400#endif
401	npestop(sc);
402	if (ifp != NULL) {
403		ether_ifdetach(ifp);
404		if_free(ifp);
405	}
406	NPE_LOCK_DESTROY(sc);
407	npe_deactivate(dev);
408	return 0;
409}
410
411/*
412 * Compute and install the multicast filter.
413 */
414static void
415npe_setmcast(struct npe_softc *sc)
416{
417	struct ifnet *ifp = sc->sc_ifp;
418	uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
419	int i;
420
421	if (ifp->if_flags & IFF_PROMISC) {
422		memset(mask, 0, ETHER_ADDR_LEN);
423		memset(addr, 0, ETHER_ADDR_LEN);
424	} else if (ifp->if_flags & IFF_ALLMULTI) {
425		static const uint8_t allmulti[ETHER_ADDR_LEN] =
426		    { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
427		memcpy(mask, allmulti, ETHER_ADDR_LEN);
428		memcpy(addr, allmulti, ETHER_ADDR_LEN);
429	} else {
430		uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
431		struct ifmultiaddr *ifma;
432		const uint8_t *mac;
433
434		memset(clr, 0, ETHER_ADDR_LEN);
435		memset(set, 0xff, ETHER_ADDR_LEN);
436
437		if_maddr_rlock(ifp);
438		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
439			if (ifma->ifma_addr->sa_family != AF_LINK)
440				continue;
441			mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
442			for (i = 0; i < ETHER_ADDR_LEN; i++) {
443				clr[i] |= mac[i];
444				set[i] &= mac[i];
445			}
446		}
447		if_maddr_runlock(ifp);
448
449		for (i = 0; i < ETHER_ADDR_LEN; i++) {
450			mask[i] = set[i] | ~clr[i];
451			addr[i] = set[i];
452		}
453	}
454
455	/*
456	 * Write the mask and address registers.
457	 */
458	for (i = 0; i < ETHER_ADDR_LEN; i++) {
459		WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
460		WR4(sc, NPE_MAC_ADDR(i), addr[i]);
461	}
462}
463
464static void
465npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
466{
467	struct npe_softc *sc;
468
469	if (error != 0)
470		return;
471	sc = (struct npe_softc *)arg;
472	sc->buf_phys = segs[0].ds_addr;
473}
474
475static int
476npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
477	const char *name, int nbuf, int maxseg)
478{
479	int error, i;
480
481	memset(dma, 0, sizeof(*dma));
482
483	dma->name = name;
484	dma->nbuf = nbuf;
485
486	/* DMA tag for mapped mbufs  */
487	error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
488	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
489	    MCLBYTES, maxseg, MCLBYTES, 0,
490	    busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
491	if (error != 0) {
492		device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
493		     "error %u\n", dma->name, error);
494		return error;
495	}
496
497	/* DMA tag and map for the NPE buffers */
498	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
499	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
500	    nbuf * sizeof(struct npehwbuf), 1,
501	    nbuf * sizeof(struct npehwbuf), 0,
502	    busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
503	if (error != 0) {
504		device_printf(sc->sc_dev,
505		    "unable to create %s npebuf dma tag, error %u\n",
506		    dma->name, error);
507		return error;
508	}
509	if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
510	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
511	    &dma->buf_map) != 0) {
512		device_printf(sc->sc_dev,
513		     "unable to allocate memory for %s h/w buffers, error %u\n",
514		     dma->name, error);
515		return error;
516	}
517	/* XXX M_TEMP */
518	dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
519	if (dma->buf == NULL) {
520		device_printf(sc->sc_dev,
521		     "unable to allocate memory for %s s/w buffers\n",
522		     dma->name);
523		return error;
524	}
525	if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
526	    dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
527		device_printf(sc->sc_dev,
528		     "unable to map memory for %s h/w buffers, error %u\n",
529		     dma->name, error);
530		return error;
531	}
532	dma->buf_phys = sc->buf_phys;
533	for (i = 0; i < dma->nbuf; i++) {
534		struct npebuf *npe = &dma->buf[i];
535		struct npehwbuf *hw = &dma->hwbuf[i];
536
537		/* calculate offset to shared area */
538		npe->ix_neaddr = dma->buf_phys +
539			((uintptr_t)hw - (uintptr_t)dma->hwbuf);
540		KASSERT((npe->ix_neaddr & 0x1f) == 0,
541		    ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
542		error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
543				&npe->ix_map);
544		if (error != 0) {
545			device_printf(sc->sc_dev,
546			     "unable to create dmamap for %s buffer %u, "
547			     "error %u\n", dma->name, i, error);
548			return error;
549		}
550		npe->ix_hw = hw;
551	}
552	bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
553	return 0;
554}
555
556static void
557npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
558{
559	int i;
560
561	if (dma->hwbuf != NULL) {
562		for (i = 0; i < dma->nbuf; i++) {
563			struct npebuf *npe = &dma->buf[i];
564			bus_dmamap_destroy(dma->mtag, npe->ix_map);
565		}
566		bus_dmamap_unload(dma->buf_tag, dma->buf_map);
567		bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
568	}
569	if (dma->buf != NULL)
570		free(dma->buf, M_TEMP);
571	if (dma->buf_tag)
572		bus_dma_tag_destroy(dma->buf_tag);
573	if (dma->mtag)
574		bus_dma_tag_destroy(dma->mtag);
575	memset(dma, 0, sizeof(*dma));
576}
577
578static int
579override_addr(device_t dev, const char *resname, int *base)
580{
581	int unit = device_get_unit(dev);
582	const char *resval;
583
584	/* XXX warn for wrong hint type */
585	if (resource_string_value("npe", unit, resname, &resval) != 0)
586		return 0;
587	switch (resval[0]) {
588	case 'A':
589		*base = IXP435_MAC_A_HWBASE;
590		break;
591	case 'B':
592		*base = IXP425_MAC_B_HWBASE;
593		break;
594	case 'C':
595		*base = IXP425_MAC_C_HWBASE;
596		break;
597	default:
598		device_printf(dev, "Warning, bad value %s for "
599		    "npe.%d.%s ignored\n", resval, unit, resname);
600		return 0;
601	}
602	if (bootverbose)
603		device_printf(dev, "using npe.%d.%s=%s override\n",
604		    unit, resname, resval);
605	return 1;
606}
607
608static int
609override_npeid(device_t dev, const char *resname, int *npeid)
610{
611	int unit = device_get_unit(dev);
612	const char *resval;
613
614	/* XXX warn for wrong hint type */
615	if (resource_string_value("npe", unit, resname, &resval) != 0)
616		return 0;
617	switch (resval[0]) {
618	case 'A': *npeid = NPE_A; break;
619	case 'B': *npeid = NPE_B; break;
620	case 'C': *npeid = NPE_C; break;
621	default:
622		device_printf(dev, "Warning, bad value %s for "
623		    "npe.%d.%s ignored\n", resval, unit, resname);
624		return 0;
625	}
626	if (bootverbose)
627		device_printf(dev, "using npe.%d.%s=%s override\n",
628		    unit, resname, resval);
629	return 1;
630}
631
632static int
633override_unit(device_t dev, const char *resname, int *val, int min, int max)
634{
635	int unit = device_get_unit(dev);
636	int resval;
637
638	if (resource_int_value("npe", unit, resname, &resval) != 0)
639		return 0;
640	if (!(min <= resval && resval <= max)) {
641		device_printf(dev, "Warning, bad value %d for npe.%d.%s "
642		    "ignored (value must be [%d-%d])\n", resval, unit,
643		    resname, min, max);
644		return 0;
645	}
646	if (bootverbose)
647		device_printf(dev, "using npe.%d.%s=%d override\n",
648		    unit, resname, resval);
649	*val = resval;
650	return 1;
651}
652
653static void
654npe_mac_reset(struct npe_softc *sc)
655{
656	/*
657	 * Reset MAC core.
658	 */
659	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
660	DELAY(NPE_MAC_RESET_DELAY);
661	/* configure MAC to generate MDC clock */
662	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
663}
664
665static int
666npe_activate(device_t dev)
667{
668	struct npe_softc *sc = device_get_softc(dev);
669	int error, i, macbase, miibase, phy;
670
671	/*
672	 * Setup NEP ID, MAC, and MII bindings.  We allow override
673	 * via hints to handle unexpected board configs.
674	 */
675	if (!override_npeid(dev, "npeid", &sc->sc_npeid))
676		sc->sc_npeid = unit2npeid(device_get_unit(dev));
677	sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
678	if (sc->sc_npe == NULL) {
679		device_printf(dev, "cannot attach ixpnpe\n");
680		return EIO;		/* XXX */
681	}
682
683	/* MAC */
684	if (!override_addr(dev, "mac", &macbase))
685		macbase = npeconfig[sc->sc_npeid].macbase;
686	if (bootverbose)
687		device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
688	if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
689		device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
690		    macbase, IXP425_REG_SIZE);
691		return ENOMEM;
692	}
693
694	/* PHY */
695	if (!override_unit(dev, "phy", &phy, 0, MII_NPHY - 1))
696		phy = npeconfig[sc->sc_npeid].phy;
697	if (!override_addr(dev, "mii", &miibase))
698		miibase = npeconfig[sc->sc_npeid].miibase;
699	if (bootverbose)
700		device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
701	if (miibase != macbase) {
702		/*
703		 * PHY is mapped through a different MAC, setup an
704		 * additional mapping for frobbing the PHY registers.
705		 */
706		if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
707			device_printf(dev,
708			    "cannot map MII registers 0x%x:0x%x\n",
709			    miibase, IXP425_REG_SIZE);
710			return ENOMEM;
711		}
712	} else
713		sc->sc_miih = sc->sc_ioh;
714
715	/*
716	 * Load NPE firmware and start it running.
717	 */
718	error = ixpnpe_init(sc->sc_npe);
719	if (error != 0) {
720		device_printf(dev, "cannot init NPE (error %d)\n", error);
721		return error;
722	}
723
724	/* attach PHY */
725	error = mii_attach(dev, &sc->sc_mii, sc->sc_ifp, npe_ifmedia_update,
726	    npe_ifmedia_status, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
727	if (error != 0) {
728		device_printf(dev, "attaching PHYs failed\n");
729		return error;
730	}
731
732	error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
733	if (error != 0)
734		return error;
735	error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
736	if (error != 0)
737		return error;
738
739	/* setup statistics block */
740	error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
741	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
742	    sizeof(struct npestats), 1, sizeof(struct npestats), 0,
743	    busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
744	if (error != 0) {
745		device_printf(sc->sc_dev, "unable to create stats tag, "
746		     "error %u\n", error);
747		return error;
748	}
749	if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
750	    BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
751		device_printf(sc->sc_dev,
752		     "unable to allocate memory for stats block, error %u\n",
753		     error);
754		return error;
755	}
756	if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
757	    sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
758		device_printf(sc->sc_dev,
759		     "unable to load memory for stats block, error %u\n",
760		     error);
761		return error;
762	}
763	sc->sc_stats_phys = sc->buf_phys;
764
765	/*
766	 * Setup h/w rx/tx queues.  There are four q's:
767	 *   rx		inbound q of rx'd frames
768	 *   rx_free	pool of ixpbuf's for receiving frames
769	 *   tx		outbound q of frames to send
770	 *   tx_done	q of tx frames that have been processed
771	 *
772	 * The NPE handles the actual tx/rx process and the q manager
773	 * handles the queues.  The driver just writes entries to the
774	 * q manager mailbox's and gets callbacks when there are rx'd
775	 * frames to process or tx'd frames to reap.  These callbacks
776	 * are controlled by the q configurations; e.g. we get a
777	 * callback when tx_done has 2 or more frames to process and
778	 * when the rx q has at least one frame.  These setings can
779	 * changed at the time the q is configured.
780	 */
781	sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
782	ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0,  1,
783		IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc);
784	sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
785	ixpqmgr_qconfig(sc->rx_freeqid,	npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
786	/*
787	 * Setup the NPE to direct all traffic to rx_qid.
788	 * When QoS is enabled in the firmware there are
789	 * 8 traffic classes; otherwise just 4.
790	 */
791	for (i = 0; i < 8; i++)
792		npe_setrxqosentry(sc, i, 0, sc->rx_qid);
793
794	/* disable firewall mode just in case (should be off) */
795	npe_setfirewallmode(sc, 0);
796
797	sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
798	sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
799	ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
800	if (tx_doneqid == -1) {
801		ixpqmgr_qconfig(sc->tx_doneqid,	npe_txbuf, 0,  2,
802			IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
803		tx_doneqid = sc->tx_doneqid;
804	}
805
806	KASSERT(npes[sc->sc_npeid] == NULL,
807	    ("npe %u already setup", sc->sc_npeid));
808	npes[sc->sc_npeid] = sc;
809
810	return 0;
811}
812
813static void
814npe_deactivate(device_t dev)
815{
816	struct npe_softc *sc = device_get_softc(dev);
817
818	npes[sc->sc_npeid] = NULL;
819
820	/* XXX disable q's */
821	if (sc->sc_npe != NULL) {
822		ixpnpe_stop(sc->sc_npe);
823		ixpnpe_detach(sc->sc_npe);
824	}
825	if (sc->sc_stats != NULL) {
826		bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
827		bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
828			sc->sc_stats_map);
829	}
830	if (sc->sc_stats_tag != NULL)
831		bus_dma_tag_destroy(sc->sc_stats_tag);
832	npe_dma_destroy(sc, &sc->txdma);
833	npe_dma_destroy(sc, &sc->rxdma);
834	bus_generic_detach(sc->sc_dev);
835	if (sc->sc_mii != NULL)
836		device_delete_child(sc->sc_dev, sc->sc_mii);
837}
838
839/*
840 * Change media according to request.
841 */
842static int
843npe_ifmedia_update(struct ifnet *ifp)
844{
845	struct npe_softc *sc = ifp->if_softc;
846	struct mii_data *mii;
847
848	mii = device_get_softc(sc->sc_mii);
849	NPE_LOCK(sc);
850	mii_mediachg(mii);
851	/* XXX push state ourself? */
852	NPE_UNLOCK(sc);
853	return (0);
854}
855
856/*
857 * Notify the world which media we're using.
858 */
859static void
860npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
861{
862	struct npe_softc *sc = ifp->if_softc;
863	struct mii_data *mii;
864
865	mii = device_get_softc(sc->sc_mii);
866	NPE_LOCK(sc);
867	mii_pollstat(mii);
868	ifmr->ifm_active = mii->mii_media_active;
869	ifmr->ifm_status = mii->mii_media_status;
870	NPE_UNLOCK(sc);
871}
872
873static void
874npe_addstats(struct npe_softc *sc)
875{
876#define	NPEADD(x)	sc->sc_totals.x += be32toh(ns->x)
877#define	MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0)
878	struct ifnet *ifp = sc->sc_ifp;
879	struct npestats *ns = sc->sc_stats;
880
881	MIBADD(dot3StatsAlignmentErrors);
882	MIBADD(dot3StatsFCSErrors);
883	MIBADD(dot3StatsInternalMacReceiveErrors);
884	NPEADD(RxOverrunDiscards);
885	NPEADD(RxLearnedEntryDiscards);
886	NPEADD(RxLargeFramesDiscards);
887	NPEADD(RxSTPBlockedDiscards);
888	NPEADD(RxVLANTypeFilterDiscards);
889	NPEADD(RxVLANIdFilterDiscards);
890	NPEADD(RxInvalidSourceDiscards);
891	NPEADD(RxBlackListDiscards);
892	NPEADD(RxWhiteListDiscards);
893	NPEADD(RxUnderflowEntryDiscards);
894	MIBADD(dot3StatsSingleCollisionFrames);
895	MIBADD(dot3StatsMultipleCollisionFrames);
896	MIBADD(dot3StatsDeferredTransmissions);
897	MIBADD(dot3StatsLateCollisions);
898	MIBADD(dot3StatsExcessiveCollisions);
899	MIBADD(dot3StatsInternalMacTransmitErrors);
900	MIBADD(dot3StatsCarrierSenseErrors);
901	NPEADD(TxLargeFrameDiscards);
902	NPEADD(TxVLANIdFilterDiscards);
903
904	sc->mibdata.dot3StatsFrameTooLongs +=
905	      be32toh(ns->RxLargeFramesDiscards)
906	    + be32toh(ns->TxLargeFrameDiscards);
907	sc->mibdata.dot3StatsMissedFrames +=
908	      be32toh(ns->RxOverrunDiscards)
909	    + be32toh(ns->RxUnderflowEntryDiscards);
910
911	if_inc_counter(ifp, IFCOUNTER_OERRORS,
912	    be32toh(ns->dot3StatsInternalMacTransmitErrors) +
913	    be32toh(ns->dot3StatsCarrierSenseErrors) +
914	    be32toh(ns->TxVLANIdFilterDiscards));
915	if_inc_counter(ifp, IFCOUNTER_IERRORS,
916	    be32toh(ns->dot3StatsFCSErrors) +
917	    be32toh(ns->dot3StatsInternalMacReceiveErrors) +
918	    be32toh(ns->RxOverrunDiscards) +
919	    be32toh(ns->RxUnderflowEntryDiscards));
920	if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
921	    be32toh(ns->dot3StatsSingleCollisionFrames) +
922	    be32toh(ns->dot3StatsMultipleCollisionFrames));
923#undef NPEADD
924#undef MIBADD
925}
926
927static void
928npe_tick(void *xsc)
929{
930#define	ACK	(NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
931	struct npe_softc *sc = xsc;
932	struct mii_data *mii = device_get_softc(sc->sc_mii);
933	uint32_t msg[2];
934
935	NPE_ASSERT_LOCKED(sc);
936
937	/*
938	 * NB: to avoid sleeping with the softc lock held we
939	 * split the NPE msg processing into two parts.  The
940	 * request for statistics is sent w/o waiting for a
941	 * reply and then on the next tick we retrieve the
942	 * results.  This works because npe_tick is the only
943	 * code that talks via the mailbox's (except at setup).
944	 * This likely can be handled better.
945	 */
946	if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
947		bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
948		    BUS_DMASYNC_POSTREAD);
949		npe_addstats(sc);
950	}
951	npe_updatestats(sc);
952	mii_tick(mii);
953
954	npewatchdog(sc);
955
956	/* schedule next poll */
957	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
958#undef ACK
959}
960
961static void
962npe_setmac(struct npe_softc *sc, u_char *eaddr)
963{
964	WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
965	WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
966	WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
967	WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
968	WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
969	WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
970}
971
972static void
973npe_getmac(struct npe_softc *sc, u_char *eaddr)
974{
975	/* NB: the unicast address appears to be loaded from EEPROM on reset */
976	eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
977	eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
978	eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
979	eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
980	eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
981	eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
982}
983
984struct txdone {
985	struct npebuf *head;
986	struct npebuf **tail;
987	int count;
988};
989
990static __inline void
991npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
992{
993	struct ifnet *ifp = sc->sc_ifp;
994
995	NPE_LOCK(sc);
996	*td->tail = sc->tx_free;
997	sc->tx_free = td->head;
998	/*
999	 * We're no longer busy, so clear the busy flag and call the
1000	 * start routine to xmit more packets.
1001	 */
1002	if_inc_counter(ifp, IFCOUNTER_OPACKETS, td->count);
1003	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1004	sc->npe_watchdog_timer = 0;
1005	npestart_locked(ifp);
1006	NPE_UNLOCK(sc);
1007}
1008
1009/*
1010 * Q manager callback on tx done queue.  Reap mbufs
1011 * and return tx buffers to the free list.  Finally
1012 * restart output.  Note the microcode has only one
1013 * txdone q wired into it so we must use the NPE ID
1014 * returned with each npehwbuf to decide where to
1015 * send buffers.
1016 */
1017static void
1018npe_txdone(int qid, void *arg)
1019{
1020#define	P2V(a, dma) \
1021	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1022	struct npe_softc *sc0 = arg;
1023	struct npe_softc *sc;
1024	struct npebuf *npe;
1025	struct txdone *td, q[NPE_MAX];
1026	uint32_t entry;
1027
1028	q[NPE_A].tail = &q[NPE_A].head; q[NPE_A].count = 0;
1029	q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
1030	q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
1031	/* XXX max # at a time? */
1032	while (ixpqmgr_qread(qid, &entry) == 0) {
1033		DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
1034		    __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
1035
1036		sc = npes[NPE_QM_Q_NPE(entry)];
1037		npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
1038		m_freem(npe->ix_m);
1039		npe->ix_m = NULL;
1040
1041		td = &q[NPE_QM_Q_NPE(entry)];
1042		*td->tail = npe;
1043		td->tail = &npe->ix_next;
1044		td->count++;
1045	}
1046
1047	if (q[NPE_A].count)
1048		npe_txdone_finish(npes[NPE_A], &q[NPE_A]);
1049	if (q[NPE_B].count)
1050		npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
1051	if (q[NPE_C].count)
1052		npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
1053#undef P2V
1054}
1055
1056static int
1057npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
1058{
1059	bus_dma_segment_t segs[1];
1060	struct npedma *dma = &sc->rxdma;
1061	struct npehwbuf *hw;
1062	int error, nseg;
1063
1064	if (m == NULL) {
1065		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1066		if (m == NULL)
1067			return ENOBUFS;
1068	}
1069	KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
1070		("ext_size %d", m->m_ext.ext_size));
1071	m->m_pkthdr.len = m->m_len = 1536;
1072	/* backload payload and align ip hdr */
1073	m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
1074	bus_dmamap_unload(dma->mtag, npe->ix_map);
1075	error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
1076			segs, &nseg, 0);
1077	if (error != 0) {
1078		m_freem(m);
1079		return error;
1080	}
1081	hw = npe->ix_hw;
1082	hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1083	/* NB: NPE requires length be a multiple of 64 */
1084	/* NB: buffer length is shifted in word */
1085	hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1086	hw->ix_ne[0].next = 0;
1087	bus_dmamap_sync(dma->buf_tag, dma->buf_map,
1088	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1089	npe->ix_m = m;
1090	/* Flush the memory in the mbuf */
1091	bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1092	return 0;
1093}
1094
1095/*
1096 * RX q processing for a specific NPE.  Claim entries
1097 * from the hardware queue and pass the frames up the
1098 * stack. Pass the rx buffers to the free list.
1099 */
1100static int
1101npe_rxdone(int qid, void *arg)
1102{
1103#define	P2V(a, dma) \
1104	&(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1105	struct npe_softc *sc = arg;
1106	struct npedma *dma = &sc->rxdma;
1107	uint32_t entry;
1108	int rx_npkts = 0;
1109
1110	while (ixpqmgr_qread(qid, &entry) == 0) {
1111		struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1112		struct mbuf *m;
1113
1114		bus_dmamap_sync(dma->buf_tag, dma->buf_map,
1115		    BUS_DMASYNC_POSTREAD);
1116		DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1117		    __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1118		/*
1119		 * Allocate a new mbuf to replenish the rx buffer.
1120		 * If doing so fails we drop the rx'd frame so we
1121		 * can reuse the previous mbuf.  When we're able to
1122		 * allocate a new mbuf dispatch the mbuf w/ rx'd
1123		 * data up the stack and replace it with the newly
1124		 * allocated one.
1125		 */
1126		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1127		if (m != NULL) {
1128			struct mbuf *mrx = npe->ix_m;
1129			struct npehwbuf *hw = npe->ix_hw;
1130			struct ifnet *ifp = sc->sc_ifp;
1131
1132			/* Flush mbuf memory for rx'd data */
1133			bus_dmamap_sync(dma->mtag, npe->ix_map,
1134			    BUS_DMASYNC_POSTREAD);
1135
1136			/* set m_len etc. per rx frame size */
1137			mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1138			mrx->m_pkthdr.len = mrx->m_len;
1139			mrx->m_pkthdr.rcvif = ifp;
1140
1141			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1142			ifp->if_input(ifp, mrx);
1143			rx_npkts++;
1144		} else {
1145			/* discard frame and re-use mbuf */
1146			m = npe->ix_m;
1147		}
1148		if (npe_rxbuf_init(sc, npe, m) == 0) {
1149			/* return npe buf to rx free list */
1150			ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1151		} else {
1152			/* XXX should not happen */
1153		}
1154	}
1155	return rx_npkts;
1156#undef P2V
1157}
1158
1159#ifdef DEVICE_POLLING
1160static int
1161npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1162{
1163	struct npe_softc *sc = ifp->if_softc;
1164	int rx_npkts = 0;
1165
1166	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1167		rx_npkts = npe_rxdone(sc->rx_qid, sc);
1168		npe_txdone(sc->tx_doneqid, sc);	/* XXX polls both NPE's */
1169	}
1170	return rx_npkts;
1171}
1172#endif /* DEVICE_POLLING */
1173
1174static void
1175npe_startxmit(struct npe_softc *sc)
1176{
1177	struct npedma *dma = &sc->txdma;
1178	int i;
1179
1180	NPE_ASSERT_LOCKED(sc);
1181	sc->tx_free = NULL;
1182	for (i = 0; i < dma->nbuf; i++) {
1183		struct npebuf *npe = &dma->buf[i];
1184		if (npe->ix_m != NULL) {
1185			/* NB: should not happen */
1186			device_printf(sc->sc_dev,
1187			    "%s: free mbuf at entry %u\n", __func__, i);
1188			m_freem(npe->ix_m);
1189		}
1190		npe->ix_m = NULL;
1191		npe->ix_next = sc->tx_free;
1192		sc->tx_free = npe;
1193	}
1194}
1195
1196static void
1197npe_startrecv(struct npe_softc *sc)
1198{
1199	struct npedma *dma = &sc->rxdma;
1200	struct npebuf *npe;
1201	int i;
1202
1203	NPE_ASSERT_LOCKED(sc);
1204	for (i = 0; i < dma->nbuf; i++) {
1205		npe = &dma->buf[i];
1206		npe_rxbuf_init(sc, npe, npe->ix_m);
1207		/* set npe buf on rx free list */
1208		ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1209	}
1210}
1211
1212/*
1213 * Reset and initialize the chip
1214 */
1215static void
1216npeinit_locked(void *xsc)
1217{
1218	struct npe_softc *sc = xsc;
1219	struct ifnet *ifp = sc->sc_ifp;
1220
1221	NPE_ASSERT_LOCKED(sc);
1222if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1223
1224	/*
1225	 * Reset MAC core.
1226	 */
1227	npe_mac_reset(sc);
1228
1229	/* disable transmitter and reciver in the MAC */
1230 	WR4(sc, NPE_MAC_RX_CNTRL1,
1231	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1232 	WR4(sc, NPE_MAC_TX_CNTRL1,
1233	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1234
1235	/*
1236	 * Set the MAC core registers.
1237	 */
1238	WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1);	/* clock ratio: for ipx4xx */
1239	WR4(sc, NPE_MAC_TX_CNTRL2,	0xf);	/* max retries */
1240	WR4(sc, NPE_MAC_RANDOM_SEED,	0x8);	/* LFSR back-off seed */
1241	/* thresholds determined by NPE firmware FS */
1242	WR4(sc, NPE_MAC_THRESH_P_EMPTY,	0x12);
1243	WR4(sc, NPE_MAC_THRESH_P_FULL,	0x30);
1244	WR4(sc, NPE_MAC_BUF_SIZE_TX,	0x8);	/* tx fifo threshold (bytes) */
1245	WR4(sc, NPE_MAC_TX_DEFER,	0x15);	/* for single deferral */
1246	WR4(sc, NPE_MAC_RX_DEFER,	0x16);	/* deferral on inter-frame gap*/
1247	WR4(sc, NPE_MAC_TX_TWO_DEFER_1,	0x8);	/* for 2-part deferral */
1248	WR4(sc, NPE_MAC_TX_TWO_DEFER_2,	0x7);	/* for 2-part deferral */
1249	WR4(sc, NPE_MAC_SLOT_TIME,	0x80);	/* assumes MII mode */
1250
1251	WR4(sc, NPE_MAC_TX_CNTRL1,
1252		  NPE_TX_CNTRL1_RETRY		/* retry failed xmits */
1253		| NPE_TX_CNTRL1_FCS_EN		/* append FCS */
1254		| NPE_TX_CNTRL1_2DEFER		/* 2-part deferal */
1255		| NPE_TX_CNTRL1_PAD_EN);	/* pad runt frames */
1256	/* XXX pad strip? */
1257	/* ena pause frame handling */
1258	WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
1259	WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1260
1261	npe_setmac(sc, IF_LLADDR(ifp));
1262	npe_setportaddress(sc, IF_LLADDR(ifp));
1263	npe_setmcast(sc);
1264
1265	npe_startxmit(sc);
1266	npe_startrecv(sc);
1267
1268	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1269	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1270	sc->npe_watchdog_timer = 0;		/* just in case */
1271
1272	/* enable transmitter and reciver in the MAC */
1273 	WR4(sc, NPE_MAC_RX_CNTRL1,
1274	    RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1275 	WR4(sc, NPE_MAC_TX_CNTRL1,
1276	    RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1277
1278	callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1279}
1280
1281static void
1282npeinit(void *xsc)
1283{
1284	struct npe_softc *sc = xsc;
1285	NPE_LOCK(sc);
1286	npeinit_locked(sc);
1287	NPE_UNLOCK(sc);
1288}
1289
1290/*
1291 * Dequeue packets and place on the h/w transmit queue.
1292 */
1293static void
1294npestart_locked(struct ifnet *ifp)
1295{
1296	struct npe_softc *sc = ifp->if_softc;
1297	struct npebuf *npe;
1298	struct npehwbuf *hw;
1299	struct mbuf *m, *n;
1300	struct npedma *dma = &sc->txdma;
1301	bus_dma_segment_t segs[NPE_MAXSEG];
1302	int nseg, len, error, i;
1303	uint32_t next;
1304
1305	NPE_ASSERT_LOCKED(sc);
1306	/* XXX can this happen? */
1307	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1308		return;
1309
1310	while (sc->tx_free != NULL) {
1311		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1312		if (m == NULL) {
1313			/* XXX? */
1314			ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1315			return;
1316		}
1317		npe = sc->tx_free;
1318		bus_dmamap_unload(dma->mtag, npe->ix_map);
1319		error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1320		    m, segs, &nseg, 0);
1321		if (error == EFBIG) {
1322			n = m_collapse(m, M_NOWAIT, NPE_MAXSEG);
1323			if (n == NULL) {
1324				if_printf(ifp, "%s: too many fragments %u\n",
1325				    __func__, nseg);
1326				m_freem(m);
1327				return;	/* XXX? */
1328			}
1329			m = n;
1330			error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1331			    m, segs, &nseg, 0);
1332		}
1333		if (error != 0 || nseg == 0) {
1334			if_printf(ifp, "%s: error %u nseg %u\n",
1335			    __func__, error, nseg);
1336			m_freem(m);
1337			return;	/* XXX? */
1338		}
1339		sc->tx_free = npe->ix_next;
1340
1341		bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1342
1343		/*
1344		 * Tap off here if there is a bpf listener.
1345		 */
1346		BPF_MTAP(ifp, m);
1347
1348		npe->ix_m = m;
1349		hw = npe->ix_hw;
1350		len = m->m_pkthdr.len;
1351		next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1352		for (i = 0; i < nseg; i++) {
1353			hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1354			hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1355			hw->ix_ne[i].next = htobe32(next);
1356
1357			len = 0;		/* zero for segments > 1 */
1358			next += sizeof(hw->ix_ne[0]);
1359		}
1360		hw->ix_ne[i-1].next = 0;	/* zero last in chain */
1361		bus_dmamap_sync(dma->buf_tag, dma->buf_map,
1362		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1363
1364		DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1365		    __func__, sc->tx_qid, npe->ix_neaddr,
1366		    hw->ix_ne[0].data, hw->ix_ne[0].len);
1367		/* stick it on the tx q */
1368		/* XXX add vlan priority */
1369		ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1370
1371		sc->npe_watchdog_timer = 5;
1372	}
1373	if (sc->tx_free == NULL)
1374		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1375}
1376
1377void
1378npestart(struct ifnet *ifp)
1379{
1380	struct npe_softc *sc = ifp->if_softc;
1381	NPE_LOCK(sc);
1382	npestart_locked(ifp);
1383	NPE_UNLOCK(sc);
1384}
1385
1386static void
1387npe_stopxmit(struct npe_softc *sc)
1388{
1389	struct npedma *dma = &sc->txdma;
1390	int i;
1391
1392	NPE_ASSERT_LOCKED(sc);
1393
1394	/* XXX qmgr */
1395	for (i = 0; i < dma->nbuf; i++) {
1396		struct npebuf *npe = &dma->buf[i];
1397
1398		if (npe->ix_m != NULL) {
1399			bus_dmamap_unload(dma->mtag, npe->ix_map);
1400			m_freem(npe->ix_m);
1401			npe->ix_m = NULL;
1402		}
1403	}
1404}
1405
1406static void
1407npe_stoprecv(struct npe_softc *sc)
1408{
1409	struct npedma *dma = &sc->rxdma;
1410	int i;
1411
1412	NPE_ASSERT_LOCKED(sc);
1413
1414	/* XXX qmgr */
1415	for (i = 0; i < dma->nbuf; i++) {
1416		struct npebuf *npe = &dma->buf[i];
1417
1418		if (npe->ix_m != NULL) {
1419			bus_dmamap_unload(dma->mtag, npe->ix_map);
1420			m_freem(npe->ix_m);
1421			npe->ix_m = NULL;
1422		}
1423	}
1424}
1425
1426/*
1427 * Turn off interrupts, and stop the nic.
1428 */
1429void
1430npestop(struct npe_softc *sc)
1431{
1432	struct ifnet *ifp = sc->sc_ifp;
1433
1434	/*  disable transmitter and reciver in the MAC  */
1435 	WR4(sc, NPE_MAC_RX_CNTRL1,
1436	    RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1437 	WR4(sc, NPE_MAC_TX_CNTRL1,
1438	    RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1439
1440	sc->npe_watchdog_timer = 0;
1441	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1442
1443	callout_stop(&sc->tick_ch);
1444
1445	npe_stopxmit(sc);
1446	npe_stoprecv(sc);
1447	/* XXX go into loopback & drain q's? */
1448	/* XXX but beware of disabling tx above */
1449
1450	/*
1451	 * The MAC core rx/tx disable may leave the MAC hardware in an
1452	 * unpredictable state. A hw reset is executed before resetting
1453	 * all the MAC parameters to a known value.
1454	 */
1455	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1456	DELAY(NPE_MAC_RESET_DELAY);
1457	WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1458	WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1459}
1460
1461void
1462npewatchdog(struct npe_softc *sc)
1463{
1464	NPE_ASSERT_LOCKED(sc);
1465
1466	if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1467		return;
1468
1469	device_printf(sc->sc_dev, "watchdog timeout\n");
1470	if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1471
1472	npeinit_locked(sc);
1473}
1474
1475static int
1476npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1477{
1478	struct npe_softc *sc = ifp->if_softc;
1479 	struct mii_data *mii;
1480 	struct ifreq *ifr = (struct ifreq *)data;
1481	int error = 0;
1482#ifdef DEVICE_POLLING
1483	int mask;
1484#endif
1485
1486	switch (cmd) {
1487	case SIOCSIFFLAGS:
1488		NPE_LOCK(sc);
1489		if ((ifp->if_flags & IFF_UP) == 0 &&
1490		    ifp->if_drv_flags & IFF_DRV_RUNNING) {
1491			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1492			npestop(sc);
1493		} else {
1494			/* reinitialize card on any parameter change */
1495			npeinit_locked(sc);
1496		}
1497		NPE_UNLOCK(sc);
1498		break;
1499
1500	case SIOCADDMULTI:
1501	case SIOCDELMULTI:
1502		/* update multicast filter list. */
1503		NPE_LOCK(sc);
1504		npe_setmcast(sc);
1505		NPE_UNLOCK(sc);
1506		error = 0;
1507		break;
1508
1509  	case SIOCSIFMEDIA:
1510  	case SIOCGIFMEDIA:
1511 		mii = device_get_softc(sc->sc_mii);
1512 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1513  		break;
1514
1515#ifdef DEVICE_POLLING
1516	case SIOCSIFCAP:
1517		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1518		if (mask & IFCAP_POLLING) {
1519			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1520				error = ether_poll_register(npe_poll, ifp);
1521				if (error)
1522					return error;
1523				NPE_LOCK(sc);
1524				/* disable callbacks XXX txdone is shared */
1525				ixpqmgr_notify_disable(sc->rx_qid);
1526				ixpqmgr_notify_disable(sc->tx_doneqid);
1527				ifp->if_capenable |= IFCAP_POLLING;
1528				NPE_UNLOCK(sc);
1529			} else {
1530				error = ether_poll_deregister(ifp);
1531				/* NB: always enable qmgr callbacks */
1532				NPE_LOCK(sc);
1533				/* enable qmgr callbacks */
1534				ixpqmgr_notify_enable(sc->rx_qid,
1535				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1536				ixpqmgr_notify_enable(sc->tx_doneqid,
1537				    IX_QMGR_Q_SOURCE_ID_NOT_E);
1538				ifp->if_capenable &= ~IFCAP_POLLING;
1539				NPE_UNLOCK(sc);
1540			}
1541		}
1542		break;
1543#endif
1544	default:
1545		error = ether_ioctl(ifp, cmd, data);
1546		break;
1547	}
1548	return error;
1549}
1550
1551/*
1552 * Setup a traffic class -> rx queue mapping.
1553 */
1554static int
1555npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1556{
1557	uint32_t msg[2];
1558
1559	msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
1560	msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1561	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1562}
1563
1564static int
1565npe_setportaddress(struct npe_softc *sc, const uint8_t mac[ETHER_ADDR_LEN])
1566{
1567	uint32_t msg[2];
1568
1569	msg[0] = (NPE_SETPORTADDRESS << 24)
1570	       | (sc->sc_npeid << 20)
1571	       | (mac[0] << 8)
1572	       | (mac[1] << 0);
1573	msg[1] = (mac[2] << 24)
1574	       | (mac[3] << 16)
1575	       | (mac[4] << 8)
1576	       | (mac[5] << 0);
1577	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1578}
1579
1580static int
1581npe_setfirewallmode(struct npe_softc *sc, int onoff)
1582{
1583	uint32_t msg[2];
1584
1585	/* XXX honor onoff */
1586	msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
1587	msg[1] = 0;
1588	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1589}
1590
1591/*
1592 * Update and reset the statistics in the NPE.
1593 */
1594static int
1595npe_updatestats(struct npe_softc *sc)
1596{
1597	uint32_t msg[2];
1598
1599	msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1600	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1601	return ixpnpe_sendmsg_async(sc->sc_npe, msg);
1602}
1603
1604#if 0
1605/*
1606 * Get the current statistics block.
1607 */
1608static int
1609npe_getstats(struct npe_softc *sc)
1610{
1611	uint32_t msg[2];
1612
1613	msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1614	msg[1] = sc->sc_stats_phys;	/* physical address of stat block */
1615	return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1616}
1617
1618/*
1619 * Query the image id of the loaded firmware.
1620 */
1621static uint32_t
1622npe_getimageid(struct npe_softc *sc)
1623{
1624	uint32_t msg[2];
1625
1626	msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1627	msg[1] = 0;
1628	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1629}
1630
1631/*
1632 * Enable/disable loopback.
1633 */
1634static int
1635npe_setloopback(struct npe_softc *sc, int ena)
1636{
1637	uint32_t msg[2];
1638
1639	msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1640	msg[1] = 0;
1641	return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1642}
1643#endif
1644
1645static void
1646npe_child_detached(device_t dev, device_t child)
1647{
1648	struct npe_softc *sc;
1649
1650	sc = device_get_softc(dev);
1651	if (child == sc->sc_mii)
1652		sc->sc_mii = NULL;
1653}
1654
1655/*
1656 * MII bus support routines.
1657 */
1658#define	MII_RD4(sc, reg)	bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1659#define	MII_WR4(sc, reg, v) \
1660	bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1661
1662static uint32_t
1663npe_mii_mdio_read(struct npe_softc *sc, int reg)
1664{
1665	uint32_t v;
1666
1667	/* NB: registers are known to be sequential */
1668	v =  (MII_RD4(sc, reg+0) & 0xff) << 0;
1669	v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1670	v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1671	v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1672	return v;
1673}
1674
1675static void
1676npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1677{
1678	/* NB: registers are known to be sequential */
1679	MII_WR4(sc, reg+0, cmd & 0xff);
1680	MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1681	MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1682	MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1683}
1684
1685static int
1686npe_mii_mdio_wait(struct npe_softc *sc)
1687{
1688	uint32_t v;
1689	int i;
1690
1691	/* NB: typically this takes 25-30 trips */
1692	for (i = 0; i < 1000; i++) {
1693		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1694		if ((v & NPE_MII_GO) == 0)
1695			return 1;
1696		DELAY(1);
1697	}
1698	device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
1699	    __func__, v);
1700	return 0;		/* NB: timeout */
1701}
1702
1703static int
1704npe_miibus_readreg(device_t dev, int phy, int reg)
1705{
1706	struct npe_softc *sc = device_get_softc(dev);
1707	uint32_t v;
1708
1709	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
1710	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1711	if (npe_mii_mdio_wait(sc))
1712		v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1713	else
1714		v = 0xffff | NPE_MII_READ_FAIL;
1715	return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1716}
1717
1718static int
1719npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1720{
1721	struct npe_softc *sc = device_get_softc(dev);
1722	uint32_t v;
1723
1724	v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1725	  | data | NPE_MII_WRITE
1726	  | NPE_MII_GO;
1727	npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1728	/* XXX complain about timeout */
1729	(void) npe_mii_mdio_wait(sc);
1730	return (0);
1731}
1732
1733static void
1734npe_miibus_statchg(device_t dev)
1735{
1736	struct npe_softc *sc = device_get_softc(dev);
1737	struct mii_data *mii = device_get_softc(sc->sc_mii);
1738	uint32_t tx1, rx1;
1739
1740	/* sync MAC duplex state */
1741	tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1742	rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1743	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1744		tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1745		rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1746	} else {
1747		tx1 |= NPE_TX_CNTRL1_DUPLEX;
1748		rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1749	}
1750	WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1751	WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1752}
1753
1754static device_method_t npe_methods[] = {
1755	/* Device interface */
1756	DEVMETHOD(device_probe,		npe_probe),
1757	DEVMETHOD(device_attach,	npe_attach),
1758	DEVMETHOD(device_detach,	npe_detach),
1759
1760	/* Bus interface */
1761	DEVMETHOD(bus_child_detached,	npe_child_detached),
1762
1763	/* MII interface */
1764	DEVMETHOD(miibus_readreg,	npe_miibus_readreg),
1765	DEVMETHOD(miibus_writereg,	npe_miibus_writereg),
1766	DEVMETHOD(miibus_statchg,	npe_miibus_statchg),
1767
1768	{ 0, 0 }
1769};
1770
1771static driver_t npe_driver = {
1772	"npe",
1773	npe_methods,
1774	sizeof(struct npe_softc),
1775};
1776
1777DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1778DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1779MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1780MODULE_DEPEND(npe, miibus, 1, 1, 1);
1781MODULE_DEPEND(npe, ether, 1, 1, 1);
1782