Deleted Added
full compact
1/*-
2 * Copyright (c) 2006-2008 Sam Leffler. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25#include <sys/cdefs.h>
26__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 195049 2009-06-26 11:45:06Z rwatson $");
26__FBSDID("$FreeBSD: head/sys/arm/xscale/ixp425/if_npe.c 207554 2010-05-03 07:32:50Z sobomax $");
27
28/*
29 * Intel XScale NPE Ethernet driver.
30 *
31 * This driver handles the two ports present on the IXP425.
32 * Packet processing is done by the Network Processing Engines
33 * (NPE's) that work together with a MAC and PHY. The MAC
34 * is also mapped to the XScale cpu; the PHY is accessed via
35 * the MAC. NPE-XScale communication happens through h/w
36 * queues managed by the Q Manager block.
37 *
38 * The code here replaces the ethAcc, ethMii, and ethDB classes
39 * in the Intel Access Library (IAL) and the OS-specific driver.
40 *
41 * XXX add vlan support
42 */
43#ifdef HAVE_KERNEL_OPTION_HEADERS
44#include "opt_device_polling.h"
45#endif
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/bus.h>
50#include <sys/kernel.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/module.h>
54#include <sys/rman.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/endian.h>
59#include <machine/bus.h>
60
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/if_mib.h>
67#include <net/if_types.h>
68
69#ifdef INET
70#include <netinet/in.h>
71#include <netinet/in_systm.h>
72#include <netinet/in_var.h>
73#include <netinet/ip.h>
74#endif
75
76#include <net/bpf.h>
77#include <net/bpfdesc.h>
78
79#include <arm/xscale/ixp425/ixp425reg.h>
80#include <arm/xscale/ixp425/ixp425var.h>
81#include <arm/xscale/ixp425/ixp425_qmgr.h>
82#include <arm/xscale/ixp425/ixp425_npevar.h>
83
84#include <dev/mii/mii.h>
85#include <dev/mii/miivar.h>
86#include <arm/xscale/ixp425/if_npereg.h>
87
88#include <machine/armreg.h>
89
90#include "miibus_if.h"
91
92/*
93 * XXX: For the main bus dma tag. Can go away if the new method to get the
94 * dma tag from the parent got MFC'd into RELENG_6.
95 */
96extern struct ixp425_softc *ixp425_softc;
97
98struct npebuf {
99 struct npebuf *ix_next; /* chain to next buffer */
100 void *ix_m; /* backpointer to mbuf */
101 bus_dmamap_t ix_map; /* bus dma map for associated data */
102 struct npehwbuf *ix_hw; /* associated h/w block */
103 uint32_t ix_neaddr; /* phys address of ix_hw */
104};
105
106struct npedma {
107 const char* name;
108 int nbuf; /* # npebuf's allocated */
109 bus_dma_tag_t mtag; /* bus dma tag for mbuf data */
110 struct npehwbuf *hwbuf; /* NPE h/w buffers */
111 bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */
112 bus_dmamap_t buf_map;
113 bus_addr_t buf_phys; /* phys addr of buffers */
114 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */
115};
116
117struct npe_softc {
118 /* XXX mii requires this be first; do not move! */
119 struct ifnet *sc_ifp; /* ifnet pointer */
120 struct mtx sc_mtx; /* basically a perimeter lock */
121 device_t sc_dev;
122 bus_space_tag_t sc_iot;
123 bus_space_handle_t sc_ioh; /* MAC register window */
124 device_t sc_mii; /* child miibus */
125 bus_space_handle_t sc_miih; /* MII register window */
126 int sc_npeid;
127 struct ixpnpe_softc *sc_npe; /* NPE support */
128 int sc_debug; /* DPRINTF* control */
129 int sc_tickinterval;
130 struct callout tick_ch; /* Tick callout */
131 int npe_watchdog_timer;
132 struct npedma txdma;
133 struct npebuf *tx_free; /* list of free tx buffers */
134 struct npedma rxdma;
135 bus_addr_t buf_phys; /* XXX for returning a value */
136 int rx_qid; /* rx qid */
137 int rx_freeqid; /* rx free buffers qid */
138 int tx_qid; /* tx qid */
139 int tx_doneqid; /* tx completed qid */
140 int sc_phy; /* PHY id */
141 struct ifmib_iso_8802_3 mibdata;
142 bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */
143 struct npestats *sc_stats;
144 bus_dmamap_t sc_stats_map;
145 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */
146 struct npestats sc_totals; /* accumulated sc_stats */
147};
148
149/*
150 * Static configuration for IXP425. The tx and
151 * rx free Q id's are fixed by the NPE microcode. The
152 * rx Q id's are programmed to be separate to simplify
153 * multi-port processing. It may be better to handle
154 * all traffic through one Q (as done by the Intel drivers).
155 *
156 * Note that the PHY's are accessible only from MAC B on the
157 * IXP425 and from MAC C on other devices. This and other
158 * platform-specific assumptions are handled with hints.
159 */
160static const struct {
161 uint32_t macbase;
162 uint32_t miibase;
163 int phy; /* phy id */
164 uint8_t rx_qid;
165 uint8_t rx_freeqid;
166 uint8_t tx_qid;
167 uint8_t tx_doneqid;
168} npeconfig[NPE_MAX] = {
169 [NPE_A] = {
170 .macbase = IXP435_MAC_A_HWBASE,
171 .miibase = IXP425_MAC_C_HWBASE,
172 .phy = 2,
173 .rx_qid = 4,
174 .rx_freeqid = 26,
175 .tx_qid = 23,
176 .tx_doneqid = 31
177 },
178 [NPE_B] = {
179 .macbase = IXP425_MAC_B_HWBASE,
180 .miibase = IXP425_MAC_B_HWBASE,
181 .phy = 0,
182 .rx_qid = 4,
183 .rx_freeqid = 27,
184 .tx_qid = 24,
185 .tx_doneqid = 31
186 },
187 [NPE_C] = {
188 .macbase = IXP425_MAC_C_HWBASE,
189 .miibase = IXP425_MAC_B_HWBASE,
190 .phy = 1,
191 .rx_qid = 12,
192 .rx_freeqid = 28,
193 .tx_qid = 25,
194 .tx_doneqid = 31
195 },
196};
197static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */
198
199static __inline uint32_t
200RD4(struct npe_softc *sc, bus_size_t off)
201{
202 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
203}
204
205static __inline void
206WR4(struct npe_softc *sc, bus_size_t off, uint32_t val)
207{
208 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
209}
210
211#define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
212#define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
213#define NPE_LOCK_INIT(_sc) \
214 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
215 MTX_NETWORK_LOCK, MTX_DEF)
216#define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
217#define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
218#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
219
220static devclass_t npe_devclass;
221
222static int override_npeid(device_t, const char *resname, int *val);
223static int npe_activate(device_t dev);
224static void npe_deactivate(device_t dev);
225static int npe_ifmedia_update(struct ifnet *ifp);
226static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr);
227static void npe_setmac(struct npe_softc *sc, u_char *eaddr);
228static void npe_getmac(struct npe_softc *sc, u_char *eaddr);
229static void npe_txdone(int qid, void *arg);
230static int npe_rxbuf_init(struct npe_softc *, struct npebuf *,
231 struct mbuf *);
232static int npe_rxdone(int qid, void *arg);
233static void npeinit(void *);
234static void npestart_locked(struct ifnet *);
235static void npestart(struct ifnet *);
236static void npestop(struct npe_softc *);
237static void npewatchdog(struct npe_softc *);
238static int npeioctl(struct ifnet * ifp, u_long, caddr_t);
239
240static int npe_setrxqosentry(struct npe_softc *, int classix,
241 int trafclass, int qid);
242static int npe_setportaddress(struct npe_softc *, const uint8_t mac[]);
243static int npe_setfirewallmode(struct npe_softc *, int onoff);
244static int npe_updatestats(struct npe_softc *);
245#if 0
246static int npe_getstats(struct npe_softc *);
247static uint32_t npe_getimageid(struct npe_softc *);
248static int npe_setloopback(struct npe_softc *, int ena);
249#endif
250
251/* NB: all tx done processing goes through one queue */
252static int tx_doneqid = -1;
253
254SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP4XX NPE driver parameters");
255
256static int npe_debug = 0;
257SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug,
258 0, "IXP4XX NPE network interface debug msgs");
259TUNABLE_INT("hw.npe.debug", &npe_debug);
260#define DPRINTF(sc, fmt, ...) do { \
261 if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
262} while (0)
263#define DPRINTFn(n, sc, fmt, ...) do { \
264 if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\
265} while (0)
266static int npe_tickinterval = 3; /* npe_tick frequency (secs) */
267SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval,
268 0, "periodic work interval (secs)");
269TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval);
270
271static int npe_rxbuf = 64; /* # rx buffers to allocate */
272SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf,
273 0, "rx buffers allocated");
274TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf);
275static int npe_txbuf = 128; /* # tx buffers to allocate */
276SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf,
277 0, "tx buffers allocated");
278TUNABLE_INT("hw.npe.txbuf", &npe_txbuf);
279
280static int
281unit2npeid(int unit)
282{
283 static const int npeidmap[2][3] = {
284 /* on 425 A is for HSS, B & C are for Ethernet */
285 { NPE_B, NPE_C, -1 }, /* IXP425 */
286 /* 435 only has A & C, order C then A */
287 { NPE_C, NPE_A, -1 }, /* IXP435 */
288 };
289 /* XXX check feature register instead */
290 return (unit < 3 ? npeidmap[
291 (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1);
292}
293
294static int
295npe_probe(device_t dev)
296{
297 static const char *desc[NPE_MAX] = {
298 [NPE_A] = "IXP NPE-A",
299 [NPE_B] = "IXP NPE-B",
300 [NPE_C] = "IXP NPE-C"
301 };
302 int unit = device_get_unit(dev);
303 int npeid;
304
305 if (unit > 2 ||
306 (ixp4xx_read_feature_bits() &
307 (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0)
308 return EINVAL;
309
310 npeid = -1;
311 if (!override_npeid(dev, "npeid", &npeid))
312 npeid = unit2npeid(unit);
313 if (npeid == -1) {
314 device_printf(dev, "unit %d not supported\n", unit);
315 return EINVAL;
316 }
317 device_set_desc(dev, desc[npeid]);
318 return 0;
319}
320
321static int
322npe_attach(device_t dev)
323{
324 struct npe_softc *sc = device_get_softc(dev);
325 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
326 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
327 struct sysctl_oid *tree = device_get_sysctl_tree(dev);
328 struct ifnet *ifp;
329 int error;
330 u_char eaddr[6];
331
332 sc->sc_dev = dev;
333 sc->sc_iot = sa->sc_iot;
334 NPE_LOCK_INIT(sc);
335 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0);
336 sc->sc_debug = npe_debug;
337 sc->sc_tickinterval = npe_tickinterval;
338
339 ifp = if_alloc(IFT_ETHER);
340 if (ifp == NULL) {
341 device_printf(dev, "cannot allocate ifnet\n");
342 error = EIO; /* XXX */
343 goto out;
344 }
345 /* NB: must be setup prior to invoking mii code */
346 sc->sc_ifp = ifp;
347
348 error = npe_activate(dev);
349 if (error) {
350 device_printf(dev, "cannot activate npe\n");
351 goto out;
352 }
353
354 npe_getmac(sc, eaddr);
355
356 ifp->if_softc = sc;
357 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
358 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
359 ifp->if_start = npestart;
360 ifp->if_ioctl = npeioctl;
361 ifp->if_init = npeinit;
362 IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1);
363 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
363 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
364 IFQ_SET_READY(&ifp->if_snd);
365 ifp->if_linkmib = &sc->mibdata;
366 ifp->if_linkmiblen = sizeof(sc->mibdata);
367 sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS;
368 /* device supports oversided vlan frames */
369 ifp->if_capabilities |= IFCAP_VLAN_MTU;
370 ifp->if_capenable = ifp->if_capabilities;
371#ifdef DEVICE_POLLING
372 ifp->if_capabilities |= IFCAP_POLLING;
373#endif
374
375 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug",
376 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs");
377 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval",
378 CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency");
379 SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
380 CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats");
381
382 ether_ifattach(ifp, eaddr);
383 return 0;
384out:
385 if (ifp != NULL)
386 if_free(ifp);
387 NPE_LOCK_DESTROY(sc);
388 npe_deactivate(dev);
389 return error;
390}
391
392static int
393npe_detach(device_t dev)
394{
395 struct npe_softc *sc = device_get_softc(dev);
396 struct ifnet *ifp = sc->sc_ifp;
397
398#ifdef DEVICE_POLLING
399 if (ifp->if_capenable & IFCAP_POLLING)
400 ether_poll_deregister(ifp);
401#endif
402 npestop(sc);
403 if (ifp != NULL) {
404 ether_ifdetach(ifp);
405 if_free(ifp);
406 }
407 NPE_LOCK_DESTROY(sc);
408 npe_deactivate(dev);
409 return 0;
410}
411
412/*
413 * Compute and install the multicast filter.
414 */
415static void
416npe_setmcast(struct npe_softc *sc)
417{
418 struct ifnet *ifp = sc->sc_ifp;
419 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN];
420 int i;
421
422 if (ifp->if_flags & IFF_PROMISC) {
423 memset(mask, 0, ETHER_ADDR_LEN);
424 memset(addr, 0, ETHER_ADDR_LEN);
425 } else if (ifp->if_flags & IFF_ALLMULTI) {
426 static const uint8_t allmulti[ETHER_ADDR_LEN] =
427 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
428 memcpy(mask, allmulti, ETHER_ADDR_LEN);
429 memcpy(addr, allmulti, ETHER_ADDR_LEN);
430 } else {
431 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN];
432 struct ifmultiaddr *ifma;
433 const uint8_t *mac;
434
435 memset(clr, 0, ETHER_ADDR_LEN);
436 memset(set, 0xff, ETHER_ADDR_LEN);
437
438 if_maddr_rlock(ifp);
439 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
440 if (ifma->ifma_addr->sa_family != AF_LINK)
441 continue;
442 mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
443 for (i = 0; i < ETHER_ADDR_LEN; i++) {
444 clr[i] |= mac[i];
445 set[i] &= mac[i];
446 }
447 }
448 if_maddr_runlock(ifp);
449
450 for (i = 0; i < ETHER_ADDR_LEN; i++) {
451 mask[i] = set[i] | ~clr[i];
452 addr[i] = set[i];
453 }
454 }
455
456 /*
457 * Write the mask and address registers.
458 */
459 for (i = 0; i < ETHER_ADDR_LEN; i++) {
460 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]);
461 WR4(sc, NPE_MAC_ADDR(i), addr[i]);
462 }
463}
464
465static void
466npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
467{
468 struct npe_softc *sc;
469
470 if (error != 0)
471 return;
472 sc = (struct npe_softc *)arg;
473 sc->buf_phys = segs[0].ds_addr;
474}
475
476static int
477npe_dma_setup(struct npe_softc *sc, struct npedma *dma,
478 const char *name, int nbuf, int maxseg)
479{
480 int error, i;
481
482 memset(dma, 0, sizeof(*dma));
483
484 dma->name = name;
485 dma->nbuf = nbuf;
486
487 /* DMA tag for mapped mbufs */
488 error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0,
489 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
490 MCLBYTES, maxseg, MCLBYTES, 0,
491 busdma_lock_mutex, &sc->sc_mtx, &dma->mtag);
492 if (error != 0) {
493 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, "
494 "error %u\n", dma->name, error);
495 return error;
496 }
497
498 /* DMA tag and map for the NPE buffers */
499 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
500 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
501 nbuf * sizeof(struct npehwbuf), 1,
502 nbuf * sizeof(struct npehwbuf), 0,
503 busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag);
504 if (error != 0) {
505 device_printf(sc->sc_dev,
506 "unable to create %s npebuf dma tag, error %u\n",
507 dma->name, error);
508 return error;
509 }
510 /* XXX COHERENT for now */
511 if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf,
512 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
513 &dma->buf_map) != 0) {
514 device_printf(sc->sc_dev,
515 "unable to allocate memory for %s h/w buffers, error %u\n",
516 dma->name, error);
517 return error;
518 }
519 /* XXX M_TEMP */
520 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO);
521 if (dma->buf == NULL) {
522 device_printf(sc->sc_dev,
523 "unable to allocate memory for %s s/w buffers\n",
524 dma->name);
525 return error;
526 }
527 if (bus_dmamap_load(dma->buf_tag, dma->buf_map,
528 dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) {
529 device_printf(sc->sc_dev,
530 "unable to map memory for %s h/w buffers, error %u\n",
531 dma->name, error);
532 return error;
533 }
534 dma->buf_phys = sc->buf_phys;
535 for (i = 0; i < dma->nbuf; i++) {
536 struct npebuf *npe = &dma->buf[i];
537 struct npehwbuf *hw = &dma->hwbuf[i];
538
539 /* calculate offset to shared area */
540 npe->ix_neaddr = dma->buf_phys +
541 ((uintptr_t)hw - (uintptr_t)dma->hwbuf);
542 KASSERT((npe->ix_neaddr & 0x1f) == 0,
543 ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr));
544 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT,
545 &npe->ix_map);
546 if (error != 0) {
547 device_printf(sc->sc_dev,
548 "unable to create dmamap for %s buffer %u, "
549 "error %u\n", dma->name, i, error);
550 return error;
551 }
552 npe->ix_hw = hw;
553 }
554 bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE);
555 return 0;
556}
557
558static void
559npe_dma_destroy(struct npe_softc *sc, struct npedma *dma)
560{
561 int i;
562
563 if (dma->hwbuf != NULL) {
564 for (i = 0; i < dma->nbuf; i++) {
565 struct npebuf *npe = &dma->buf[i];
566 bus_dmamap_destroy(dma->mtag, npe->ix_map);
567 }
568 bus_dmamap_unload(dma->buf_tag, dma->buf_map);
569 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map);
570 }
571 if (dma->buf != NULL)
572 free(dma->buf, M_TEMP);
573 if (dma->buf_tag)
574 bus_dma_tag_destroy(dma->buf_tag);
575 if (dma->mtag)
576 bus_dma_tag_destroy(dma->mtag);
577 memset(dma, 0, sizeof(*dma));
578}
579
580static int
581override_addr(device_t dev, const char *resname, int *base)
582{
583 int unit = device_get_unit(dev);
584 const char *resval;
585
586 /* XXX warn for wrong hint type */
587 if (resource_string_value("npe", unit, resname, &resval) != 0)
588 return 0;
589 switch (resval[0]) {
590 case 'A':
591 *base = IXP435_MAC_A_HWBASE;
592 break;
593 case 'B':
594 *base = IXP425_MAC_B_HWBASE;
595 break;
596 case 'C':
597 *base = IXP425_MAC_C_HWBASE;
598 break;
599 default:
600 device_printf(dev, "Warning, bad value %s for "
601 "npe.%d.%s ignored\n", resval, unit, resname);
602 return 0;
603 }
604 if (bootverbose)
605 device_printf(dev, "using npe.%d.%s=%s override\n",
606 unit, resname, resval);
607 return 1;
608}
609
610static int
611override_npeid(device_t dev, const char *resname, int *npeid)
612{
613 int unit = device_get_unit(dev);
614 const char *resval;
615
616 /* XXX warn for wrong hint type */
617 if (resource_string_value("npe", unit, resname, &resval) != 0)
618 return 0;
619 switch (resval[0]) {
620 case 'A': *npeid = NPE_A; break;
621 case 'B': *npeid = NPE_B; break;
622 case 'C': *npeid = NPE_C; break;
623 default:
624 device_printf(dev, "Warning, bad value %s for "
625 "npe.%d.%s ignored\n", resval, unit, resname);
626 return 0;
627 }
628 if (bootverbose)
629 device_printf(dev, "using npe.%d.%s=%s override\n",
630 unit, resname, resval);
631 return 1;
632}
633
634static int
635override_unit(device_t dev, const char *resname, int *val, int min, int max)
636{
637 int unit = device_get_unit(dev);
638 int resval;
639
640 if (resource_int_value("npe", unit, resname, &resval) != 0)
641 return 0;
642 if (!(min <= resval && resval <= max)) {
643 device_printf(dev, "Warning, bad value %d for npe.%d.%s "
644 "ignored (value must be [%d-%d])\n", resval, unit,
645 resname, min, max);
646 return 0;
647 }
648 if (bootverbose)
649 device_printf(dev, "using npe.%d.%s=%d override\n",
650 unit, resname, resval);
651 *val = resval;
652 return 1;
653}
654
655static void
656npe_mac_reset(struct npe_softc *sc)
657{
658 /*
659 * Reset MAC core.
660 */
661 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
662 DELAY(NPE_MAC_RESET_DELAY);
663 /* configure MAC to generate MDC clock */
664 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
665}
666
667static int
668npe_activate(device_t dev)
669{
670 struct npe_softc *sc = device_get_softc(dev);
671 int error, i, macbase, miibase;
672
673 /*
674 * Setup NEP ID, MAC, and MII bindings. We allow override
675 * via hints to handle unexpected board configs.
676 */
677 if (!override_npeid(dev, "npeid", &sc->sc_npeid))
678 sc->sc_npeid = unit2npeid(device_get_unit(dev));
679 sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid);
680 if (sc->sc_npe == NULL) {
681 device_printf(dev, "cannot attach ixpnpe\n");
682 return EIO; /* XXX */
683 }
684
685 /* MAC */
686 if (!override_addr(dev, "mac", &macbase))
687 macbase = npeconfig[sc->sc_npeid].macbase;
688 device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase);
689 if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) {
690 device_printf(dev, "cannot map mac registers 0x%x:0x%x\n",
691 macbase, IXP425_REG_SIZE);
692 return ENOMEM;
693 }
694
695 /* PHY */
696 if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1))
697 sc->sc_phy = npeconfig[sc->sc_npeid].phy;
698 if (!override_addr(dev, "mii", &miibase))
699 miibase = npeconfig[sc->sc_npeid].miibase;
700 device_printf(sc->sc_dev, "MII at 0x%x\n", miibase);
701 if (miibase != macbase) {
702 /*
703 * PHY is mapped through a different MAC, setup an
704 * additional mapping for frobbing the PHY registers.
705 */
706 if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) {
707 device_printf(dev,
708 "cannot map MII registers 0x%x:0x%x\n",
709 miibase, IXP425_REG_SIZE);
710 return ENOMEM;
711 }
712 } else
713 sc->sc_miih = sc->sc_ioh;
714
715 /*
716 * Load NPE firmware and start it running.
717 */
718 error = ixpnpe_init(sc->sc_npe);
719 if (error != 0) {
720 device_printf(dev, "cannot init NPE (error %d)\n", error);
721 return error;
722 }
723
724 /* probe for PHY */
725 if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) {
726 device_printf(dev, "cannot find PHY %d.\n", sc->sc_phy);
727 return ENXIO;
728 }
729
730 error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG);
731 if (error != 0)
732 return error;
733 error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1);
734 if (error != 0)
735 return error;
736
737 /* setup statistics block */
738 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0,
739 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
740 sizeof(struct npestats), 1, sizeof(struct npestats), 0,
741 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag);
742 if (error != 0) {
743 device_printf(sc->sc_dev, "unable to create stats tag, "
744 "error %u\n", error);
745 return error;
746 }
747 if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats,
748 BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) {
749 device_printf(sc->sc_dev,
750 "unable to allocate memory for stats block, error %u\n",
751 error);
752 return error;
753 }
754 if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map,
755 sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) {
756 device_printf(sc->sc_dev,
757 "unable to load memory for stats block, error %u\n",
758 error);
759 return error;
760 }
761 sc->sc_stats_phys = sc->buf_phys;
762
763 /*
764 * Setup h/w rx/tx queues. There are four q's:
765 * rx inbound q of rx'd frames
766 * rx_free pool of ixpbuf's for receiving frames
767 * tx outbound q of frames to send
768 * tx_done q of tx frames that have been processed
769 *
770 * The NPE handles the actual tx/rx process and the q manager
771 * handles the queues. The driver just writes entries to the
772 * q manager mailbox's and gets callbacks when there are rx'd
773 * frames to process or tx'd frames to reap. These callbacks
774 * are controlled by the q configurations; e.g. we get a
775 * callback when tx_done has 2 or more frames to process and
776 * when the rx q has at least one frame. These setings can
777 * changed at the time the q is configured.
778 */
779 sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid;
780 ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1,
781 IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc);
782 sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid;
783 ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc);
784 /*
785 * Setup the NPE to direct all traffic to rx_qid.
786 * When QoS is enabled in the firmware there are
787 * 8 traffic classes; otherwise just 4.
788 */
789 for (i = 0; i < 8; i++)
790 npe_setrxqosentry(sc, i, 0, sc->rx_qid);
791
792 /* disable firewall mode just in case (should be off) */
793 npe_setfirewallmode(sc, 0);
794
795 sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid;
796 sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid;
797 ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc);
798 if (tx_doneqid == -1) {
799 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2,
800 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc);
801 tx_doneqid = sc->tx_doneqid;
802 }
803
804 KASSERT(npes[sc->sc_npeid] == NULL,
805 ("npe %u already setup", sc->sc_npeid));
806 npes[sc->sc_npeid] = sc;
807
808 return 0;
809}
810
811static void
812npe_deactivate(device_t dev)
813{
814 struct npe_softc *sc = device_get_softc(dev);
815
816 npes[sc->sc_npeid] = NULL;
817
818 /* XXX disable q's */
819 if (sc->sc_npe != NULL) {
820 ixpnpe_stop(sc->sc_npe);
821 ixpnpe_detach(sc->sc_npe);
822 }
823 if (sc->sc_stats != NULL) {
824 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map);
825 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats,
826 sc->sc_stats_map);
827 }
828 if (sc->sc_stats_tag != NULL)
829 bus_dma_tag_destroy(sc->sc_stats_tag);
830 npe_dma_destroy(sc, &sc->txdma);
831 npe_dma_destroy(sc, &sc->rxdma);
832 bus_generic_detach(sc->sc_dev);
833 if (sc->sc_mii != NULL)
834 device_delete_child(sc->sc_dev, sc->sc_mii);
835}
836
837/*
838 * Change media according to request.
839 */
840static int
841npe_ifmedia_update(struct ifnet *ifp)
842{
843 struct npe_softc *sc = ifp->if_softc;
844 struct mii_data *mii;
845
846 mii = device_get_softc(sc->sc_mii);
847 NPE_LOCK(sc);
848 mii_mediachg(mii);
849 /* XXX push state ourself? */
850 NPE_UNLOCK(sc);
851 return (0);
852}
853
854/*
855 * Notify the world which media we're using.
856 */
857static void
858npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
859{
860 struct npe_softc *sc = ifp->if_softc;
861 struct mii_data *mii;
862
863 mii = device_get_softc(sc->sc_mii);
864 NPE_LOCK(sc);
865 mii_pollstat(mii);
866 ifmr->ifm_active = mii->mii_media_active;
867 ifmr->ifm_status = mii->mii_media_status;
868 NPE_UNLOCK(sc);
869}
870
871static void
872npe_addstats(struct npe_softc *sc)
873{
874#define NPEADD(x) sc->sc_totals.x += be32toh(ns->x)
875#define MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0)
876 struct ifnet *ifp = sc->sc_ifp;
877 struct npestats *ns = sc->sc_stats;
878
879 MIBADD(dot3StatsAlignmentErrors);
880 MIBADD(dot3StatsFCSErrors);
881 MIBADD(dot3StatsInternalMacReceiveErrors);
882 NPEADD(RxOverrunDiscards);
883 NPEADD(RxLearnedEntryDiscards);
884 NPEADD(RxLargeFramesDiscards);
885 NPEADD(RxSTPBlockedDiscards);
886 NPEADD(RxVLANTypeFilterDiscards);
887 NPEADD(RxVLANIdFilterDiscards);
888 NPEADD(RxInvalidSourceDiscards);
889 NPEADD(RxBlackListDiscards);
890 NPEADD(RxWhiteListDiscards);
891 NPEADD(RxUnderflowEntryDiscards);
892 MIBADD(dot3StatsSingleCollisionFrames);
893 MIBADD(dot3StatsMultipleCollisionFrames);
894 MIBADD(dot3StatsDeferredTransmissions);
895 MIBADD(dot3StatsLateCollisions);
896 MIBADD(dot3StatsExcessiveCollisions);
897 MIBADD(dot3StatsInternalMacTransmitErrors);
898 MIBADD(dot3StatsCarrierSenseErrors);
899 NPEADD(TxLargeFrameDiscards);
900 NPEADD(TxVLANIdFilterDiscards);
901
902 sc->mibdata.dot3StatsFrameTooLongs +=
903 be32toh(ns->RxLargeFramesDiscards)
904 + be32toh(ns->TxLargeFrameDiscards);
905 sc->mibdata.dot3StatsMissedFrames +=
906 be32toh(ns->RxOverrunDiscards)
907 + be32toh(ns->RxUnderflowEntryDiscards);
908
909 ifp->if_oerrors +=
910 be32toh(ns->dot3StatsInternalMacTransmitErrors)
911 + be32toh(ns->dot3StatsCarrierSenseErrors)
912 + be32toh(ns->TxVLANIdFilterDiscards)
913 ;
914 ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors)
915 + be32toh(ns->dot3StatsInternalMacReceiveErrors)
916 + be32toh(ns->RxOverrunDiscards)
917 + be32toh(ns->RxUnderflowEntryDiscards)
918 ;
919 ifp->if_collisions +=
920 be32toh(ns->dot3StatsSingleCollisionFrames)
921 + be32toh(ns->dot3StatsMultipleCollisionFrames)
922 ;
923#undef NPEADD
924#undef MIBADD
925}
926
927static void
928npe_tick(void *xsc)
929{
930#define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL)
931 struct npe_softc *sc = xsc;
932 struct mii_data *mii = device_get_softc(sc->sc_mii);
933 uint32_t msg[2];
934
935 NPE_ASSERT_LOCKED(sc);
936
937 /*
938 * NB: to avoid sleeping with the softc lock held we
939 * split the NPE msg processing into two parts. The
940 * request for statistics is sent w/o waiting for a
941 * reply and then on the next tick we retrieve the
942 * results. This works because npe_tick is the only
943 * code that talks via the mailbox's (except at setup).
944 * This likely can be handled better.
945 */
946 if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) {
947 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map,
948 BUS_DMASYNC_POSTREAD);
949 npe_addstats(sc);
950 }
951 npe_updatestats(sc);
952 mii_tick(mii);
953
954 npewatchdog(sc);
955
956 /* schedule next poll */
957 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
958#undef ACK
959}
960
961static void
962npe_setmac(struct npe_softc *sc, u_char *eaddr)
963{
964 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]);
965 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]);
966 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]);
967 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]);
968 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]);
969 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]);
970}
971
972static void
973npe_getmac(struct npe_softc *sc, u_char *eaddr)
974{
975 /* NB: the unicast address appears to be loaded from EEPROM on reset */
976 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff;
977 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff;
978 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff;
979 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff;
980 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff;
981 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff;
982}
983
984struct txdone {
985 struct npebuf *head;
986 struct npebuf **tail;
987 int count;
988};
989
990static __inline void
991npe_txdone_finish(struct npe_softc *sc, const struct txdone *td)
992{
993 struct ifnet *ifp = sc->sc_ifp;
994
995 NPE_LOCK(sc);
996 *td->tail = sc->tx_free;
997 sc->tx_free = td->head;
998 /*
999 * We're no longer busy, so clear the busy flag and call the
1000 * start routine to xmit more packets.
1001 */
1002 ifp->if_opackets += td->count;
1003 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1004 sc->npe_watchdog_timer = 0;
1005 npestart_locked(ifp);
1006 NPE_UNLOCK(sc);
1007}
1008
1009/*
1010 * Q manager callback on tx done queue. Reap mbufs
1011 * and return tx buffers to the free list. Finally
1012 * restart output. Note the microcode has only one
1013 * txdone q wired into it so we must use the NPE ID
1014 * returned with each npehwbuf to decide where to
1015 * send buffers.
1016 */
1017static void
1018npe_txdone(int qid, void *arg)
1019{
1020#define P2V(a, dma) \
1021 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1022 struct npe_softc *sc0 = arg;
1023 struct npe_softc *sc;
1024 struct npebuf *npe;
1025 struct txdone *td, q[NPE_MAX];
1026 uint32_t entry;
1027
1028 q[NPE_A].tail = &q[NPE_A].head; q[NPE_A].count = 0;
1029 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0;
1030 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0;
1031 /* XXX max # at a time? */
1032 while (ixpqmgr_qread(qid, &entry) == 0) {
1033 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n",
1034 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry));
1035
1036 sc = npes[NPE_QM_Q_NPE(entry)];
1037 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma);
1038 m_freem(npe->ix_m);
1039 npe->ix_m = NULL;
1040
1041 td = &q[NPE_QM_Q_NPE(entry)];
1042 *td->tail = npe;
1043 td->tail = &npe->ix_next;
1044 td->count++;
1045 }
1046
1047 if (q[NPE_A].count)
1048 npe_txdone_finish(npes[NPE_A], &q[NPE_A]);
1049 if (q[NPE_B].count)
1050 npe_txdone_finish(npes[NPE_B], &q[NPE_B]);
1051 if (q[NPE_C].count)
1052 npe_txdone_finish(npes[NPE_C], &q[NPE_C]);
1053#undef P2V
1054}
1055
1056static int
1057npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m)
1058{
1059 bus_dma_segment_t segs[1];
1060 struct npedma *dma = &sc->rxdma;
1061 struct npehwbuf *hw;
1062 int error, nseg;
1063
1064 if (m == NULL) {
1065 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1066 if (m == NULL)
1067 return ENOBUFS;
1068 }
1069 KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN,
1070 ("ext_size %d", m->m_ext.ext_size));
1071 m->m_pkthdr.len = m->m_len = 1536;
1072 /* backload payload and align ip hdr */
1073 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN));
1074 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m,
1075 segs, &nseg, 0);
1076 if (error != 0) {
1077 m_freem(m);
1078 return error;
1079 }
1080 hw = npe->ix_hw;
1081 hw->ix_ne[0].data = htobe32(segs[0].ds_addr);
1082 /* NB: NPE requires length be a multiple of 64 */
1083 /* NB: buffer length is shifted in word */
1084 hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16);
1085 hw->ix_ne[0].next = 0;
1086 npe->ix_m = m;
1087 /* Flush the memory in the mbuf */
1088 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD);
1089 return 0;
1090}
1091
1092/*
1093 * RX q processing for a specific NPE. Claim entries
1094 * from the hardware queue and pass the frames up the
1095 * stack. Pass the rx buffers to the free list.
1096 */
1097static int
1098npe_rxdone(int qid, void *arg)
1099{
1100#define P2V(a, dma) \
1101 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)]
1102 struct npe_softc *sc = arg;
1103 struct npedma *dma = &sc->rxdma;
1104 uint32_t entry;
1105 int rx_npkts = 0;
1106
1107 while (ixpqmgr_qread(qid, &entry) == 0) {
1108 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma);
1109 struct mbuf *m;
1110
1111 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n",
1112 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len);
1113 /*
1114 * Allocate a new mbuf to replenish the rx buffer.
1115 * If doing so fails we drop the rx'd frame so we
1116 * can reuse the previous mbuf. When we're able to
1117 * allocate a new mbuf dispatch the mbuf w/ rx'd
1118 * data up the stack and replace it with the newly
1119 * allocated one.
1120 */
1121 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1122 if (m != NULL) {
1123 struct mbuf *mrx = npe->ix_m;
1124 struct npehwbuf *hw = npe->ix_hw;
1125 struct ifnet *ifp = sc->sc_ifp;
1126
1127 /* Flush mbuf memory for rx'd data */
1128 bus_dmamap_sync(dma->mtag, npe->ix_map,
1129 BUS_DMASYNC_POSTREAD);
1130
1131 /* XXX flush hw buffer; works now 'cuz coherent */
1132 /* set m_len etc. per rx frame size */
1133 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff;
1134 mrx->m_pkthdr.len = mrx->m_len;
1135 mrx->m_pkthdr.rcvif = ifp;
1136
1137 ifp->if_ipackets++;
1138 ifp->if_input(ifp, mrx);
1139 rx_npkts++;
1140 } else {
1141 /* discard frame and re-use mbuf */
1142 m = npe->ix_m;
1143 }
1144 if (npe_rxbuf_init(sc, npe, m) == 0) {
1145 /* return npe buf to rx free list */
1146 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1147 } else {
1148 /* XXX should not happen */
1149 }
1150 }
1151 return rx_npkts;
1152#undef P2V
1153}
1154
1155#ifdef DEVICE_POLLING
1156static int
1157npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1158{
1159 struct npe_softc *sc = ifp->if_softc;
1160 int rx_npkts = 0;
1161
1162 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1163 rx_npkts = npe_rxdone(sc->rx_qid, sc);
1164 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */
1165 }
1166 return rx_npkts;
1167}
1168#endif /* DEVICE_POLLING */
1169
1170static void
1171npe_startxmit(struct npe_softc *sc)
1172{
1173 struct npedma *dma = &sc->txdma;
1174 int i;
1175
1176 NPE_ASSERT_LOCKED(sc);
1177 sc->tx_free = NULL;
1178 for (i = 0; i < dma->nbuf; i++) {
1179 struct npebuf *npe = &dma->buf[i];
1180 if (npe->ix_m != NULL) {
1181 /* NB: should not happen */
1182 device_printf(sc->sc_dev,
1183 "%s: free mbuf at entry %u\n", __func__, i);
1184 m_freem(npe->ix_m);
1185 }
1186 npe->ix_m = NULL;
1187 npe->ix_next = sc->tx_free;
1188 sc->tx_free = npe;
1189 }
1190}
1191
1192static void
1193npe_startrecv(struct npe_softc *sc)
1194{
1195 struct npedma *dma = &sc->rxdma;
1196 struct npebuf *npe;
1197 int i;
1198
1199 NPE_ASSERT_LOCKED(sc);
1200 for (i = 0; i < dma->nbuf; i++) {
1201 npe = &dma->buf[i];
1202 npe_rxbuf_init(sc, npe, npe->ix_m);
1203 /* set npe buf on rx free list */
1204 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr);
1205 }
1206}
1207
1208/*
1209 * Reset and initialize the chip
1210 */
1211static void
1212npeinit_locked(void *xsc)
1213{
1214 struct npe_softc *sc = xsc;
1215 struct ifnet *ifp = sc->sc_ifp;
1216
1217 NPE_ASSERT_LOCKED(sc);
1218if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/
1219
1220 /*
1221 * Reset MAC core.
1222 */
1223 npe_mac_reset(sc);
1224
1225 /* disable transmitter and reciver in the MAC */
1226 WR4(sc, NPE_MAC_RX_CNTRL1,
1227 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1228 WR4(sc, NPE_MAC_TX_CNTRL1,
1229 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1230
1231 /*
1232 * Set the MAC core registers.
1233 */
1234 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */
1235 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */
1236 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */
1237 /* thresholds determined by NPE firmware FS */
1238 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12);
1239 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30);
1240 WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */
1241 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */
1242 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/
1243 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */
1244 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */
1245 WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */
1246
1247 WR4(sc, NPE_MAC_TX_CNTRL1,
1248 NPE_TX_CNTRL1_RETRY /* retry failed xmits */
1249 | NPE_TX_CNTRL1_FCS_EN /* append FCS */
1250 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */
1251 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */
1252 /* XXX pad strip? */
1253 /* ena pause frame handling */
1254 WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN);
1255 WR4(sc, NPE_MAC_RX_CNTRL2, 0);
1256
1257 npe_setmac(sc, IF_LLADDR(ifp));
1258 npe_setportaddress(sc, IF_LLADDR(ifp));
1259 npe_setmcast(sc);
1260
1261 npe_startxmit(sc);
1262 npe_startrecv(sc);
1263
1264 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1265 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1266 sc->npe_watchdog_timer = 0; /* just in case */
1267
1268 /* enable transmitter and reciver in the MAC */
1269 WR4(sc, NPE_MAC_RX_CNTRL1,
1270 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN);
1271 WR4(sc, NPE_MAC_TX_CNTRL1,
1272 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN);
1273
1274 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc);
1275}
1276
1277static void
1278npeinit(void *xsc)
1279{
1280 struct npe_softc *sc = xsc;
1281 NPE_LOCK(sc);
1282 npeinit_locked(sc);
1283 NPE_UNLOCK(sc);
1284}
1285
1286/*
1287 * Dequeue packets and place on the h/w transmit queue.
1288 */
1289static void
1290npestart_locked(struct ifnet *ifp)
1291{
1292 struct npe_softc *sc = ifp->if_softc;
1293 struct npebuf *npe;
1294 struct npehwbuf *hw;
1295 struct mbuf *m, *n;
1296 struct npedma *dma = &sc->txdma;
1297 bus_dma_segment_t segs[NPE_MAXSEG];
1298 int nseg, len, error, i;
1299 uint32_t next;
1300
1301 NPE_ASSERT_LOCKED(sc);
1302 /* XXX can this happen? */
1303 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1304 return;
1305
1306 while (sc->tx_free != NULL) {
1307 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1308 if (m == NULL) {
1309 /* XXX? */
1310 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1311 return;
1312 }
1313 npe = sc->tx_free;
1314 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1315 m, segs, &nseg, 0);
1316 if (error == EFBIG) {
1317 n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
1318 if (n == NULL) {
1319 if_printf(ifp, "%s: too many fragments %u\n",
1320 __func__, nseg);
1321 m_freem(m);
1322 return; /* XXX? */
1323 }
1324 m = n;
1325 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
1326 m, segs, &nseg, 0);
1327 }
1328 if (error != 0 || nseg == 0) {
1329 if_printf(ifp, "%s: error %u nseg %u\n",
1330 __func__, error, nseg);
1331 m_freem(m);
1332 return; /* XXX? */
1333 }
1334 sc->tx_free = npe->ix_next;
1335
1336 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE);
1337
1338 /*
1339 * Tap off here if there is a bpf listener.
1340 */
1341 BPF_MTAP(ifp, m);
1342
1343 npe->ix_m = m;
1344 hw = npe->ix_hw;
1345 len = m->m_pkthdr.len;
1346 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]);
1347 for (i = 0; i < nseg; i++) {
1348 hw->ix_ne[i].data = htobe32(segs[i].ds_addr);
1349 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len);
1350 hw->ix_ne[i].next = htobe32(next);
1351
1352 len = 0; /* zero for segments > 1 */
1353 next += sizeof(hw->ix_ne[0]);
1354 }
1355 hw->ix_ne[i-1].next = 0; /* zero last in chain */
1356 /* XXX flush descriptor instead of using uncached memory */
1357
1358 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n",
1359 __func__, sc->tx_qid, npe->ix_neaddr,
1360 hw->ix_ne[0].data, hw->ix_ne[0].len);
1361 /* stick it on the tx q */
1362 /* XXX add vlan priority */
1363 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr);
1364
1365 sc->npe_watchdog_timer = 5;
1366 }
1367 if (sc->tx_free == NULL)
1368 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1369}
1370
1371void
1372npestart(struct ifnet *ifp)
1373{
1374 struct npe_softc *sc = ifp->if_softc;
1375 NPE_LOCK(sc);
1376 npestart_locked(ifp);
1377 NPE_UNLOCK(sc);
1378}
1379
1380static void
1381npe_stopxmit(struct npe_softc *sc)
1382{
1383 struct npedma *dma = &sc->txdma;
1384 int i;
1385
1386 NPE_ASSERT_LOCKED(sc);
1387
1388 /* XXX qmgr */
1389 for (i = 0; i < dma->nbuf; i++) {
1390 struct npebuf *npe = &dma->buf[i];
1391
1392 if (npe->ix_m != NULL) {
1393 bus_dmamap_unload(dma->mtag, npe->ix_map);
1394 m_freem(npe->ix_m);
1395 npe->ix_m = NULL;
1396 }
1397 }
1398}
1399
1400static void
1401npe_stoprecv(struct npe_softc *sc)
1402{
1403 struct npedma *dma = &sc->rxdma;
1404 int i;
1405
1406 NPE_ASSERT_LOCKED(sc);
1407
1408 /* XXX qmgr */
1409 for (i = 0; i < dma->nbuf; i++) {
1410 struct npebuf *npe = &dma->buf[i];
1411
1412 if (npe->ix_m != NULL) {
1413 bus_dmamap_unload(dma->mtag, npe->ix_map);
1414 m_freem(npe->ix_m);
1415 npe->ix_m = NULL;
1416 }
1417 }
1418}
1419
1420/*
1421 * Turn off interrupts, and stop the nic.
1422 */
1423void
1424npestop(struct npe_softc *sc)
1425{
1426 struct ifnet *ifp = sc->sc_ifp;
1427
1428 /* disable transmitter and reciver in the MAC */
1429 WR4(sc, NPE_MAC_RX_CNTRL1,
1430 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN);
1431 WR4(sc, NPE_MAC_TX_CNTRL1,
1432 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN);
1433
1434 sc->npe_watchdog_timer = 0;
1435 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1436
1437 callout_stop(&sc->tick_ch);
1438
1439 npe_stopxmit(sc);
1440 npe_stoprecv(sc);
1441 /* XXX go into loopback & drain q's? */
1442 /* XXX but beware of disabling tx above */
1443
1444 /*
1445 * The MAC core rx/tx disable may leave the MAC hardware in an
1446 * unpredictable state. A hw reset is executed before resetting
1447 * all the MAC parameters to a known value.
1448 */
1449 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET);
1450 DELAY(NPE_MAC_RESET_DELAY);
1451 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT);
1452 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN);
1453}
1454
1455void
1456npewatchdog(struct npe_softc *sc)
1457{
1458 NPE_ASSERT_LOCKED(sc);
1459
1460 if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0)
1461 return;
1462
1463 device_printf(sc->sc_dev, "watchdog timeout\n");
1464 sc->sc_ifp->if_oerrors++;
1465
1466 npeinit_locked(sc);
1467}
1468
1469static int
1470npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1471{
1472 struct npe_softc *sc = ifp->if_softc;
1473 struct mii_data *mii;
1474 struct ifreq *ifr = (struct ifreq *)data;
1475 int error = 0;
1476#ifdef DEVICE_POLLING
1477 int mask;
1478#endif
1479
1480 switch (cmd) {
1481 case SIOCSIFFLAGS:
1482 NPE_LOCK(sc);
1483 if ((ifp->if_flags & IFF_UP) == 0 &&
1484 ifp->if_drv_flags & IFF_DRV_RUNNING) {
1485 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1486 npestop(sc);
1487 } else {
1488 /* reinitialize card on any parameter change */
1489 npeinit_locked(sc);
1490 }
1491 NPE_UNLOCK(sc);
1492 break;
1493
1494 case SIOCADDMULTI:
1495 case SIOCDELMULTI:
1496 /* update multicast filter list. */
1497 NPE_LOCK(sc);
1498 npe_setmcast(sc);
1499 NPE_UNLOCK(sc);
1500 error = 0;
1501 break;
1502
1503 case SIOCSIFMEDIA:
1504 case SIOCGIFMEDIA:
1505 mii = device_get_softc(sc->sc_mii);
1506 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1507 break;
1508
1509#ifdef DEVICE_POLLING
1510 case SIOCSIFCAP:
1511 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1512 if (mask & IFCAP_POLLING) {
1513 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1514 error = ether_poll_register(npe_poll, ifp);
1515 if (error)
1516 return error;
1517 NPE_LOCK(sc);
1518 /* disable callbacks XXX txdone is shared */
1519 ixpqmgr_notify_disable(sc->rx_qid);
1520 ixpqmgr_notify_disable(sc->tx_doneqid);
1521 ifp->if_capenable |= IFCAP_POLLING;
1522 NPE_UNLOCK(sc);
1523 } else {
1524 error = ether_poll_deregister(ifp);
1525 /* NB: always enable qmgr callbacks */
1526 NPE_LOCK(sc);
1527 /* enable qmgr callbacks */
1528 ixpqmgr_notify_enable(sc->rx_qid,
1529 IX_QMGR_Q_SOURCE_ID_NOT_E);
1530 ixpqmgr_notify_enable(sc->tx_doneqid,
1531 IX_QMGR_Q_SOURCE_ID_NOT_E);
1532 ifp->if_capenable &= ~IFCAP_POLLING;
1533 NPE_UNLOCK(sc);
1534 }
1535 }
1536 break;
1537#endif
1538 default:
1539 error = ether_ioctl(ifp, cmd, data);
1540 break;
1541 }
1542 return error;
1543}
1544
1545/*
1546 * Setup a traffic class -> rx queue mapping.
1547 */
1548static int
1549npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid)
1550{
1551 uint32_t msg[2];
1552
1553 msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix;
1554 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4);
1555 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1556}
1557
1558static int
1559npe_setportaddress(struct npe_softc *sc, const uint8_t mac[ETHER_ADDR_LEN])
1560{
1561 uint32_t msg[2];
1562
1563 msg[0] = (NPE_SETPORTADDRESS << 24)
1564 | (sc->sc_npeid << 20)
1565 | (mac[0] << 8)
1566 | (mac[1] << 0);
1567 msg[1] = (mac[2] << 24)
1568 | (mac[3] << 16)
1569 | (mac[4] << 8)
1570 | (mac[5] << 0);
1571 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1572}
1573
1574static int
1575npe_setfirewallmode(struct npe_softc *sc, int onoff)
1576{
1577 uint32_t msg[2];
1578
1579 /* XXX honor onoff */
1580 msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20);
1581 msg[1] = 0;
1582 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1583}
1584
1585/*
1586 * Update and reset the statistics in the NPE.
1587 */
1588static int
1589npe_updatestats(struct npe_softc *sc)
1590{
1591 uint32_t msg[2];
1592
1593 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL;
1594 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1595 return ixpnpe_sendmsg_async(sc->sc_npe, msg);
1596}
1597
1598#if 0
1599/*
1600 * Get the current statistics block.
1601 */
1602static int
1603npe_getstats(struct npe_softc *sc)
1604{
1605 uint32_t msg[2];
1606
1607 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL;
1608 msg[1] = sc->sc_stats_phys; /* physical address of stat block */
1609 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg);
1610}
1611
1612/*
1613 * Query the image id of the loaded firmware.
1614 */
1615static uint32_t
1616npe_getimageid(struct npe_softc *sc)
1617{
1618 uint32_t msg[2];
1619
1620 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL;
1621 msg[1] = 0;
1622 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0;
1623}
1624
1625/*
1626 * Enable/disable loopback.
1627 */
1628static int
1629npe_setloopback(struct npe_softc *sc, int ena)
1630{
1631 uint32_t msg[2];
1632
1633 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0);
1634 msg[1] = 0;
1635 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg);
1636}
1637#endif
1638
1639static void
1640npe_child_detached(device_t dev, device_t child)
1641{
1642 struct npe_softc *sc;
1643
1644 sc = device_get_softc(dev);
1645 if (child == sc->sc_mii)
1646 sc->sc_mii = NULL;
1647}
1648
1649/*
1650 * MII bus support routines.
1651 */
1652#define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg)
1653#define MII_WR4(sc, reg, v) \
1654 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v)
1655
1656static uint32_t
1657npe_mii_mdio_read(struct npe_softc *sc, int reg)
1658{
1659 uint32_t v;
1660
1661 /* NB: registers are known to be sequential */
1662 v = (MII_RD4(sc, reg+0) & 0xff) << 0;
1663 v |= (MII_RD4(sc, reg+4) & 0xff) << 8;
1664 v |= (MII_RD4(sc, reg+8) & 0xff) << 16;
1665 v |= (MII_RD4(sc, reg+12) & 0xff) << 24;
1666 return v;
1667}
1668
1669static void
1670npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd)
1671{
1672 /* NB: registers are known to be sequential */
1673 MII_WR4(sc, reg+0, cmd & 0xff);
1674 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff);
1675 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff);
1676 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff);
1677}
1678
1679static int
1680npe_mii_mdio_wait(struct npe_softc *sc)
1681{
1682 uint32_t v;
1683 int i;
1684
1685 /* NB: typically this takes 25-30 trips */
1686 for (i = 0; i < 1000; i++) {
1687 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD);
1688 if ((v & NPE_MII_GO) == 0)
1689 return 1;
1690 DELAY(1);
1691 }
1692 device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n",
1693 __func__, v);
1694 return 0; /* NB: timeout */
1695}
1696
1697static int
1698npe_miibus_readreg(device_t dev, int phy, int reg)
1699{
1700 struct npe_softc *sc = device_get_softc(dev);
1701 uint32_t v;
1702
1703 if (phy != sc->sc_phy) /* XXX no auto-detect */
1704 return 0xffff;
1705 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO;
1706 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1707 if (npe_mii_mdio_wait(sc))
1708 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS);
1709 else
1710 v = 0xffff | NPE_MII_READ_FAIL;
1711 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff);
1712}
1713
1714static int
1715npe_miibus_writereg(device_t dev, int phy, int reg, int data)
1716{
1717 struct npe_softc *sc = device_get_softc(dev);
1718 uint32_t v;
1719
1720 if (phy != sc->sc_phy) /* XXX */
1721 return (0);
1722 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL)
1723 | data | NPE_MII_WRITE
1724 | NPE_MII_GO;
1725 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v);
1726 /* XXX complain about timeout */
1727 (void) npe_mii_mdio_wait(sc);
1728 return (0);
1729}
1730
1731static void
1732npe_miibus_statchg(device_t dev)
1733{
1734 struct npe_softc *sc = device_get_softc(dev);
1735 struct mii_data *mii = device_get_softc(sc->sc_mii);
1736 uint32_t tx1, rx1;
1737
1738 /* sync MAC duplex state */
1739 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1);
1740 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1);
1741 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1742 tx1 &= ~NPE_TX_CNTRL1_DUPLEX;
1743 rx1 |= NPE_RX_CNTRL1_PAUSE_EN;
1744 } else {
1745 tx1 |= NPE_TX_CNTRL1_DUPLEX;
1746 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN;
1747 }
1748 WR4(sc, NPE_MAC_RX_CNTRL1, rx1);
1749 WR4(sc, NPE_MAC_TX_CNTRL1, tx1);
1750}
1751
1752static device_method_t npe_methods[] = {
1753 /* Device interface */
1754 DEVMETHOD(device_probe, npe_probe),
1755 DEVMETHOD(device_attach, npe_attach),
1756 DEVMETHOD(device_detach, npe_detach),
1757
1758 /* Bus interface */
1759 DEVMETHOD(bus_child_detached, npe_child_detached),
1760
1761 /* MII interface */
1762 DEVMETHOD(miibus_readreg, npe_miibus_readreg),
1763 DEVMETHOD(miibus_writereg, npe_miibus_writereg),
1764 DEVMETHOD(miibus_statchg, npe_miibus_statchg),
1765
1766 { 0, 0 }
1767};
1768
1769static driver_t npe_driver = {
1770 "npe",
1771 npe_methods,
1772 sizeof(struct npe_softc),
1773};
1774
1775DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0);
1776DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0);
1777MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1);
1778MODULE_DEPEND(npe, miibus, 1, 1, 1);
1779MODULE_DEPEND(npe, ether, 1, 1, 1);