1/*- 2 * Copyright (c) 2006-2008 Sam Leffler. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25#include <sys/cdefs.h> 26__FBSDID("$FreeBSD$"); 27 28/* 29 * Intel XScale NPE Ethernet driver. 30 * 31 * This driver handles the two ports present on the IXP425. 32 * Packet processing is done by the Network Processing Engines 33 * (NPE's) that work together with a MAC and PHY. The MAC 34 * is also mapped to the XScale cpu; the PHY is accessed via 35 * the MAC. NPE-XScale communication happens through h/w 36 * queues managed by the Q Manager block. 37 * 38 * The code here replaces the ethAcc, ethMii, and ethDB classes 39 * in the Intel Access Library (IAL) and the OS-specific driver. 40 * 41 * XXX add vlan support 42 */ 43#ifdef HAVE_KERNEL_OPTION_HEADERS 44#include "opt_device_polling.h" 45#endif 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/bus.h> 50#include <sys/kernel.h> 51#include <sys/mbuf.h> 52#include <sys/malloc.h> 53#include <sys/module.h> 54#include <sys/rman.h> 55#include <sys/socket.h> 56#include <sys/sockio.h> 57#include <sys/sysctl.h> 58#include <sys/endian.h> 59#include <machine/bus.h> 60 61#include <net/ethernet.h> 62#include <net/if.h> 63#include <net/if_arp.h> 64#include <net/if_dl.h> 65#include <net/if_media.h> 66#include <net/if_mib.h> 67#include <net/if_types.h> 68 69#ifdef INET 70#include <netinet/in.h> 71#include <netinet/in_systm.h> 72#include <netinet/in_var.h> 73#include <netinet/ip.h> 74#endif 75 76#include <net/bpf.h> 77#include <net/bpfdesc.h> 78 79#include <arm/xscale/ixp425/ixp425reg.h> 80#include <arm/xscale/ixp425/ixp425var.h> 81#include <arm/xscale/ixp425/ixp425_qmgr.h> 82#include <arm/xscale/ixp425/ixp425_npevar.h> 83 84#include <dev/mii/mii.h> 85#include <dev/mii/miivar.h> 86#include <arm/xscale/ixp425/if_npereg.h> 87 88#include <machine/armreg.h> 89 90#include "miibus_if.h" 91 92/* 93 * XXX: For the main bus dma tag. Can go away if the new method to get the 94 * dma tag from the parent got MFC'd into RELENG_6. 95 */ 96extern struct ixp425_softc *ixp425_softc; 97 98struct npebuf { 99 struct npebuf *ix_next; /* chain to next buffer */ 100 void *ix_m; /* backpointer to mbuf */ 101 bus_dmamap_t ix_map; /* bus dma map for associated data */ 102 struct npehwbuf *ix_hw; /* associated h/w block */ 103 uint32_t ix_neaddr; /* phys address of ix_hw */ 104}; 105 106struct npedma { 107 const char* name; 108 int nbuf; /* # npebuf's allocated */ 109 bus_dma_tag_t mtag; /* bus dma tag for mbuf data */ 110 struct npehwbuf *hwbuf; /* NPE h/w buffers */ 111 bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */ 112 bus_dmamap_t buf_map; 113 bus_addr_t buf_phys; /* phys addr of buffers */ 114 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */ 115}; 116 117struct npe_softc { 118 /* XXX mii requires this be first; do not move! */ 119 struct ifnet *sc_ifp; /* ifnet pointer */ 120 struct mtx sc_mtx; /* basically a perimeter lock */ 121 device_t sc_dev; 122 bus_space_tag_t sc_iot; 123 bus_space_handle_t sc_ioh; /* MAC register window */ 124 device_t sc_mii; /* child miibus */ 125 bus_space_handle_t sc_miih; /* MII register window */ 126 int sc_npeid; 127 struct ixpnpe_softc *sc_npe; /* NPE support */ 128 int sc_debug; /* DPRINTF* control */ 129 int sc_tickinterval; 130 struct callout tick_ch; /* Tick callout */ 131 int npe_watchdog_timer; 132 struct npedma txdma; 133 struct npebuf *tx_free; /* list of free tx buffers */ 134 struct npedma rxdma; 135 bus_addr_t buf_phys; /* XXX for returning a value */ 136 int rx_qid; /* rx qid */ 137 int rx_freeqid; /* rx free buffers qid */ 138 int tx_qid; /* tx qid */ 139 int tx_doneqid; /* tx completed qid */ 140 struct ifmib_iso_8802_3 mibdata; 141 bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */ 142 struct npestats *sc_stats; 143 bus_dmamap_t sc_stats_map; 144 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */ 145 struct npestats sc_totals; /* accumulated sc_stats */ 146}; 147 148/* 149 * Static configuration for IXP425. The tx and 150 * rx free Q id's are fixed by the NPE microcode. The 151 * rx Q id's are programmed to be separate to simplify 152 * multi-port processing. It may be better to handle 153 * all traffic through one Q (as done by the Intel drivers). 154 * 155 * Note that the PHY's are accessible only from MAC B on the 156 * IXP425 and from MAC C on other devices. This and other 157 * platform-specific assumptions are handled with hints. 158 */ 159static const struct { 160 uint32_t macbase; 161 uint32_t miibase; 162 int phy; /* phy id */ 163 uint8_t rx_qid; 164 uint8_t rx_freeqid; 165 uint8_t tx_qid; 166 uint8_t tx_doneqid; 167} npeconfig[NPE_MAX] = { 168 [NPE_A] = { 169 .macbase = IXP435_MAC_A_HWBASE, 170 .miibase = IXP425_MAC_C_HWBASE, 171 .phy = 2, 172 .rx_qid = 4, 173 .rx_freeqid = 26, 174 .tx_qid = 23, 175 .tx_doneqid = 31 176 }, 177 [NPE_B] = { 178 .macbase = IXP425_MAC_B_HWBASE, 179 .miibase = IXP425_MAC_B_HWBASE, 180 .phy = 0, 181 .rx_qid = 4, 182 .rx_freeqid = 27, 183 .tx_qid = 24, 184 .tx_doneqid = 31 185 }, 186 [NPE_C] = { 187 .macbase = IXP425_MAC_C_HWBASE, 188 .miibase = IXP425_MAC_B_HWBASE, 189 .phy = 1, 190 .rx_qid = 12, 191 .rx_freeqid = 28, 192 .tx_qid = 25, 193 .tx_doneqid = 31 194 }, 195}; 196static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */ 197 198static __inline uint32_t 199RD4(struct npe_softc *sc, bus_size_t off) 200{ 201 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 202} 203 204static __inline void 205WR4(struct npe_softc *sc, bus_size_t off, uint32_t val) 206{ 207 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 208} 209 210#define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 211#define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 212#define NPE_LOCK_INIT(_sc) \ 213 mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \ 214 MTX_NETWORK_LOCK, MTX_DEF) 215#define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); 216#define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); 217#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); 218 219static devclass_t npe_devclass; 220 221static int override_npeid(device_t, const char *resname, int *val); 222static int npe_activate(device_t dev); 223static void npe_deactivate(device_t dev); 224static int npe_ifmedia_update(struct ifnet *ifp); 225static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr); 226static void npe_setmac(struct npe_softc *sc, u_char *eaddr); 227static void npe_getmac(struct npe_softc *sc, u_char *eaddr); 228static void npe_txdone(int qid, void *arg); 229static int npe_rxbuf_init(struct npe_softc *, struct npebuf *, 230 struct mbuf *); 231static int npe_rxdone(int qid, void *arg); 232static void npeinit(void *); 233static void npestart_locked(struct ifnet *); 234static void npestart(struct ifnet *); 235static void npestop(struct npe_softc *); 236static void npewatchdog(struct npe_softc *); 237static int npeioctl(struct ifnet * ifp, u_long, caddr_t); 238 239static int npe_setrxqosentry(struct npe_softc *, int classix, 240 int trafclass, int qid); 241static int npe_setportaddress(struct npe_softc *, const uint8_t mac[]); 242static int npe_setfirewallmode(struct npe_softc *, int onoff); 243static int npe_updatestats(struct npe_softc *); 244#if 0 245static int npe_getstats(struct npe_softc *); 246static uint32_t npe_getimageid(struct npe_softc *); 247static int npe_setloopback(struct npe_softc *, int ena); 248#endif 249 250/* NB: all tx done processing goes through one queue */ 251static int tx_doneqid = -1; 252 253static SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, 254 "IXP4XX NPE driver parameters"); 255 256static int npe_debug = 0; 257SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug, 258 0, "IXP4XX NPE network interface debug msgs"); 259TUNABLE_INT("hw.npe.debug", &npe_debug); 260#define DPRINTF(sc, fmt, ...) do { \ 261 if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \ 262} while (0) 263#define DPRINTFn(n, sc, fmt, ...) do { \ 264 if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\ 265} while (0) 266static int npe_tickinterval = 3; /* npe_tick frequency (secs) */ 267SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval, 268 0, "periodic work interval (secs)"); 269TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval); 270 271static int npe_rxbuf = 64; /* # rx buffers to allocate */ 272SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf, 273 0, "rx buffers allocated"); 274TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf); 275static int npe_txbuf = 128; /* # tx buffers to allocate */ 276SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf, 277 0, "tx buffers allocated"); 278TUNABLE_INT("hw.npe.txbuf", &npe_txbuf); 279 280static int 281unit2npeid(int unit) 282{ 283 static const int npeidmap[2][3] = { 284 /* on 425 A is for HSS, B & C are for Ethernet */ 285 { NPE_B, NPE_C, -1 }, /* IXP425 */ 286 /* 435 only has A & C, order C then A */ 287 { NPE_C, NPE_A, -1 }, /* IXP435 */ 288 }; 289 /* XXX check feature register instead */ 290 return (unit < 3 ? npeidmap[ 291 (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1); 292} 293 294static int 295npe_probe(device_t dev) 296{ 297 static const char *desc[NPE_MAX] = { 298 [NPE_A] = "IXP NPE-A", 299 [NPE_B] = "IXP NPE-B", 300 [NPE_C] = "IXP NPE-C" 301 }; 302 int unit = device_get_unit(dev); 303 int npeid; 304 305 if (unit > 2 || 306 (ixp4xx_read_feature_bits() & 307 (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0) 308 return EINVAL; 309 310 npeid = -1; 311 if (!override_npeid(dev, "npeid", &npeid)) 312 npeid = unit2npeid(unit); 313 if (npeid == -1) { 314 device_printf(dev, "unit %d not supported\n", unit); 315 return EINVAL; 316 } 317 device_set_desc(dev, desc[npeid]); 318 return 0; 319} 320 321static int 322npe_attach(device_t dev) 323{ 324 struct npe_softc *sc = device_get_softc(dev); 325 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); 326 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); 327 struct sysctl_oid *tree = device_get_sysctl_tree(dev); 328 struct ifnet *ifp; 329 int error; 330 u_char eaddr[6]; 331 332 sc->sc_dev = dev; 333 sc->sc_iot = sa->sc_iot; 334 NPE_LOCK_INIT(sc); 335 callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); 336 sc->sc_debug = npe_debug; 337 sc->sc_tickinterval = npe_tickinterval; 338 339 ifp = if_alloc(IFT_ETHER); 340 if (ifp == NULL) { 341 device_printf(dev, "cannot allocate ifnet\n"); 342 error = EIO; /* XXX */ 343 goto out; 344 } 345 /* NB: must be setup prior to invoking mii code */ 346 sc->sc_ifp = ifp; 347 348 error = npe_activate(dev); 349 if (error) { 350 device_printf(dev, "cannot activate npe\n"); 351 goto out; 352 } 353 354 npe_getmac(sc, eaddr); 355 356 ifp->if_softc = sc; 357 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 358 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 359 ifp->if_start = npestart; 360 ifp->if_ioctl = npeioctl; 361 ifp->if_init = npeinit; 362 IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1); 363 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 364 IFQ_SET_READY(&ifp->if_snd); 365 ifp->if_linkmib = &sc->mibdata; 366 ifp->if_linkmiblen = sizeof(sc->mibdata); 367 sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS; 368 /* device supports oversided vlan frames */ 369 ifp->if_capabilities |= IFCAP_VLAN_MTU; 370 ifp->if_capenable = ifp->if_capabilities; 371#ifdef DEVICE_POLLING 372 ifp->if_capabilities |= IFCAP_POLLING; 373#endif 374 375 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", 376 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); 377 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval", 378 CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency"); 379 SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", 380 CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats"); 381 382 ether_ifattach(ifp, eaddr); 383 return 0; 384out: 385 if (ifp != NULL) 386 if_free(ifp); 387 NPE_LOCK_DESTROY(sc); 388 npe_deactivate(dev); 389 return error; 390} 391 392static int 393npe_detach(device_t dev) 394{ 395 struct npe_softc *sc = device_get_softc(dev); 396 struct ifnet *ifp = sc->sc_ifp; 397 398#ifdef DEVICE_POLLING 399 if (ifp->if_capenable & IFCAP_POLLING) 400 ether_poll_deregister(ifp); 401#endif 402 npestop(sc); 403 if (ifp != NULL) { 404 ether_ifdetach(ifp); 405 if_free(ifp); 406 } 407 NPE_LOCK_DESTROY(sc); 408 npe_deactivate(dev); 409 return 0; 410} 411 412/* 413 * Compute and install the multicast filter. 414 */ 415static void 416npe_setmcast(struct npe_softc *sc) 417{ 418 struct ifnet *ifp = sc->sc_ifp; 419 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN]; 420 int i; 421 422 if (ifp->if_flags & IFF_PROMISC) { 423 memset(mask, 0, ETHER_ADDR_LEN); 424 memset(addr, 0, ETHER_ADDR_LEN); 425 } else if (ifp->if_flags & IFF_ALLMULTI) { 426 static const uint8_t allmulti[ETHER_ADDR_LEN] = 427 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 428 memcpy(mask, allmulti, ETHER_ADDR_LEN); 429 memcpy(addr, allmulti, ETHER_ADDR_LEN); 430 } else { 431 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN]; 432 struct ifmultiaddr *ifma; 433 const uint8_t *mac; 434 435 memset(clr, 0, ETHER_ADDR_LEN); 436 memset(set, 0xff, ETHER_ADDR_LEN); 437 438 if_maddr_rlock(ifp); 439 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 440 if (ifma->ifma_addr->sa_family != AF_LINK) 441 continue; 442 mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 443 for (i = 0; i < ETHER_ADDR_LEN; i++) { 444 clr[i] |= mac[i]; 445 set[i] &= mac[i]; 446 } 447 } 448 if_maddr_runlock(ifp); 449 450 for (i = 0; i < ETHER_ADDR_LEN; i++) { 451 mask[i] = set[i] | ~clr[i]; 452 addr[i] = set[i]; 453 } 454 } 455 456 /* 457 * Write the mask and address registers. 458 */ 459 for (i = 0; i < ETHER_ADDR_LEN; i++) { 460 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]); 461 WR4(sc, NPE_MAC_ADDR(i), addr[i]); 462 } 463} 464 465static void 466npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 467{ 468 struct npe_softc *sc; 469 470 if (error != 0) 471 return; 472 sc = (struct npe_softc *)arg; 473 sc->buf_phys = segs[0].ds_addr; 474} 475 476static int 477npe_dma_setup(struct npe_softc *sc, struct npedma *dma, 478 const char *name, int nbuf, int maxseg) 479{ 480 int error, i; 481 482 memset(dma, 0, sizeof(*dma)); 483 484 dma->name = name; 485 dma->nbuf = nbuf; 486 487 /* DMA tag for mapped mbufs */ 488 error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0, 489 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 490 MCLBYTES, maxseg, MCLBYTES, 0, 491 busdma_lock_mutex, &sc->sc_mtx, &dma->mtag); 492 if (error != 0) { 493 device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, " 494 "error %u\n", dma->name, error); 495 return error; 496 } 497 498 /* DMA tag and map for the NPE buffers */ 499 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0, 500 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 501 nbuf * sizeof(struct npehwbuf), 1, 502 nbuf * sizeof(struct npehwbuf), 0, 503 busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag); 504 if (error != 0) { 505 device_printf(sc->sc_dev, 506 "unable to create %s npebuf dma tag, error %u\n", 507 dma->name, error); 508 return error; 509 } 510 /* XXX COHERENT for now */ 511 if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf, 512 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, 513 &dma->buf_map) != 0) { 514 device_printf(sc->sc_dev, 515 "unable to allocate memory for %s h/w buffers, error %u\n", 516 dma->name, error); 517 return error; 518 } 519 /* XXX M_TEMP */ 520 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO); 521 if (dma->buf == NULL) { 522 device_printf(sc->sc_dev, 523 "unable to allocate memory for %s s/w buffers\n", 524 dma->name); 525 return error; 526 } 527 if (bus_dmamap_load(dma->buf_tag, dma->buf_map, 528 dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) { 529 device_printf(sc->sc_dev, 530 "unable to map memory for %s h/w buffers, error %u\n", 531 dma->name, error); 532 return error; 533 } 534 dma->buf_phys = sc->buf_phys; 535 for (i = 0; i < dma->nbuf; i++) { 536 struct npebuf *npe = &dma->buf[i]; 537 struct npehwbuf *hw = &dma->hwbuf[i]; 538 539 /* calculate offset to shared area */ 540 npe->ix_neaddr = dma->buf_phys + 541 ((uintptr_t)hw - (uintptr_t)dma->hwbuf); 542 KASSERT((npe->ix_neaddr & 0x1f) == 0, 543 ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr)); 544 error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT, 545 &npe->ix_map); 546 if (error != 0) { 547 device_printf(sc->sc_dev, 548 "unable to create dmamap for %s buffer %u, " 549 "error %u\n", dma->name, i, error); 550 return error; 551 } 552 npe->ix_hw = hw; 553 } 554 bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE); 555 return 0; 556} 557 558static void 559npe_dma_destroy(struct npe_softc *sc, struct npedma *dma) 560{ 561 int i; 562 563 if (dma->hwbuf != NULL) { 564 for (i = 0; i < dma->nbuf; i++) { 565 struct npebuf *npe = &dma->buf[i]; 566 bus_dmamap_destroy(dma->mtag, npe->ix_map); 567 } 568 bus_dmamap_unload(dma->buf_tag, dma->buf_map); 569 bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map); 570 } 571 if (dma->buf != NULL) 572 free(dma->buf, M_TEMP); 573 if (dma->buf_tag) 574 bus_dma_tag_destroy(dma->buf_tag); 575 if (dma->mtag) 576 bus_dma_tag_destroy(dma->mtag); 577 memset(dma, 0, sizeof(*dma)); 578} 579 580static int 581override_addr(device_t dev, const char *resname, int *base) 582{ 583 int unit = device_get_unit(dev); 584 const char *resval; 585 586 /* XXX warn for wrong hint type */ 587 if (resource_string_value("npe", unit, resname, &resval) != 0) 588 return 0; 589 switch (resval[0]) { 590 case 'A': 591 *base = IXP435_MAC_A_HWBASE; 592 break; 593 case 'B': 594 *base = IXP425_MAC_B_HWBASE; 595 break; 596 case 'C': 597 *base = IXP425_MAC_C_HWBASE; 598 break; 599 default: 600 device_printf(dev, "Warning, bad value %s for " 601 "npe.%d.%s ignored\n", resval, unit, resname); 602 return 0; 603 } 604 if (bootverbose) 605 device_printf(dev, "using npe.%d.%s=%s override\n", 606 unit, resname, resval); 607 return 1; 608} 609 610static int 611override_npeid(device_t dev, const char *resname, int *npeid) 612{ 613 int unit = device_get_unit(dev); 614 const char *resval; 615 616 /* XXX warn for wrong hint type */ 617 if (resource_string_value("npe", unit, resname, &resval) != 0) 618 return 0; 619 switch (resval[0]) { 620 case 'A': *npeid = NPE_A; break; 621 case 'B': *npeid = NPE_B; break; 622 case 'C': *npeid = NPE_C; break; 623 default: 624 device_printf(dev, "Warning, bad value %s for " 625 "npe.%d.%s ignored\n", resval, unit, resname); 626 return 0; 627 } 628 if (bootverbose) 629 device_printf(dev, "using npe.%d.%s=%s override\n", 630 unit, resname, resval); 631 return 1; 632} 633 634static int 635override_unit(device_t dev, const char *resname, int *val, int min, int max) 636{ 637 int unit = device_get_unit(dev); 638 int resval; 639 640 if (resource_int_value("npe", unit, resname, &resval) != 0) 641 return 0; 642 if (!(min <= resval && resval <= max)) { 643 device_printf(dev, "Warning, bad value %d for npe.%d.%s " 644 "ignored (value must be [%d-%d])\n", resval, unit, 645 resname, min, max); 646 return 0; 647 } 648 if (bootverbose) 649 device_printf(dev, "using npe.%d.%s=%d override\n", 650 unit, resname, resval); 651 *val = resval; 652 return 1; 653} 654 655static void 656npe_mac_reset(struct npe_softc *sc) 657{ 658 /* 659 * Reset MAC core. 660 */ 661 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 662 DELAY(NPE_MAC_RESET_DELAY); 663 /* configure MAC to generate MDC clock */ 664 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 665} 666 667static int 668npe_activate(device_t dev) 669{ 670 struct npe_softc *sc = device_get_softc(dev); 671 int error, i, macbase, miibase, phy; 672 673 /* 674 * Setup NEP ID, MAC, and MII bindings. We allow override 675 * via hints to handle unexpected board configs. 676 */ 677 if (!override_npeid(dev, "npeid", &sc->sc_npeid)) 678 sc->sc_npeid = unit2npeid(device_get_unit(dev)); 679 sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid); 680 if (sc->sc_npe == NULL) { 681 device_printf(dev, "cannot attach ixpnpe\n"); 682 return EIO; /* XXX */ 683 } 684 685 /* MAC */ 686 if (!override_addr(dev, "mac", &macbase)) 687 macbase = npeconfig[sc->sc_npeid].macbase; 688 device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase); 689 if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) { 690 device_printf(dev, "cannot map mac registers 0x%x:0x%x\n", 691 macbase, IXP425_REG_SIZE); 692 return ENOMEM; 693 } 694 695 /* PHY */ 696 if (!override_unit(dev, "phy", &phy, 0, MII_NPHY - 1)) 697 phy = npeconfig[sc->sc_npeid].phy; 698 if (!override_addr(dev, "mii", &miibase)) 699 miibase = npeconfig[sc->sc_npeid].miibase; 700 device_printf(sc->sc_dev, "MII at 0x%x\n", miibase); 701 if (miibase != macbase) { 702 /* 703 * PHY is mapped through a different MAC, setup an 704 * additional mapping for frobbing the PHY registers. 705 */ 706 if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) { 707 device_printf(dev, 708 "cannot map MII registers 0x%x:0x%x\n", 709 miibase, IXP425_REG_SIZE); 710 return ENOMEM; 711 } 712 } else 713 sc->sc_miih = sc->sc_ioh; 714 715 /* 716 * Load NPE firmware and start it running. 717 */ 718 error = ixpnpe_init(sc->sc_npe); 719 if (error != 0) { 720 device_printf(dev, "cannot init NPE (error %d)\n", error); 721 return error; 722 } 723 724 /* attach PHY */ 725 error = mii_attach(dev, &sc->sc_mii, sc->sc_ifp, npe_ifmedia_update, 726 npe_ifmedia_status, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 727 if (error != 0) { 728 device_printf(dev, "attaching PHYs failed\n"); 729 return error; 730 } 731 732 error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG); 733 if (error != 0) 734 return error; 735 error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1); 736 if (error != 0) 737 return error; 738 739 /* setup statistics block */ 740 error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0, 741 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 742 sizeof(struct npestats), 1, sizeof(struct npestats), 0, 743 busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag); 744 if (error != 0) { 745 device_printf(sc->sc_dev, "unable to create stats tag, " 746 "error %u\n", error); 747 return error; 748 } 749 if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats, 750 BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) { 751 device_printf(sc->sc_dev, 752 "unable to allocate memory for stats block, error %u\n", 753 error); 754 return error; 755 } 756 if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map, 757 sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) { 758 device_printf(sc->sc_dev, 759 "unable to load memory for stats block, error %u\n", 760 error); 761 return error; 762 } 763 sc->sc_stats_phys = sc->buf_phys; 764 765 /* 766 * Setup h/w rx/tx queues. There are four q's: 767 * rx inbound q of rx'd frames 768 * rx_free pool of ixpbuf's for receiving frames 769 * tx outbound q of frames to send 770 * tx_done q of tx frames that have been processed 771 * 772 * The NPE handles the actual tx/rx process and the q manager 773 * handles the queues. The driver just writes entries to the 774 * q manager mailbox's and gets callbacks when there are rx'd 775 * frames to process or tx'd frames to reap. These callbacks 776 * are controlled by the q configurations; e.g. we get a 777 * callback when tx_done has 2 or more frames to process and 778 * when the rx q has at least one frame. These setings can 779 * changed at the time the q is configured. 780 */ 781 sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid; 782 ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1, 783 IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc); 784 sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid; 785 ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc); 786 /* 787 * Setup the NPE to direct all traffic to rx_qid. 788 * When QoS is enabled in the firmware there are 789 * 8 traffic classes; otherwise just 4. 790 */ 791 for (i = 0; i < 8; i++) 792 npe_setrxqosentry(sc, i, 0, sc->rx_qid); 793 794 /* disable firewall mode just in case (should be off) */ 795 npe_setfirewallmode(sc, 0); 796 797 sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid; 798 sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid; 799 ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc); 800 if (tx_doneqid == -1) { 801 ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2, 802 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc); 803 tx_doneqid = sc->tx_doneqid; 804 } 805 806 KASSERT(npes[sc->sc_npeid] == NULL, 807 ("npe %u already setup", sc->sc_npeid)); 808 npes[sc->sc_npeid] = sc; 809 810 return 0; 811} 812 813static void 814npe_deactivate(device_t dev) 815{ 816 struct npe_softc *sc = device_get_softc(dev); 817 818 npes[sc->sc_npeid] = NULL; 819 820 /* XXX disable q's */ 821 if (sc->sc_npe != NULL) { 822 ixpnpe_stop(sc->sc_npe); 823 ixpnpe_detach(sc->sc_npe); 824 } 825 if (sc->sc_stats != NULL) { 826 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map); 827 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats, 828 sc->sc_stats_map); 829 } 830 if (sc->sc_stats_tag != NULL) 831 bus_dma_tag_destroy(sc->sc_stats_tag); 832 npe_dma_destroy(sc, &sc->txdma); 833 npe_dma_destroy(sc, &sc->rxdma); 834 bus_generic_detach(sc->sc_dev); 835 if (sc->sc_mii != NULL) 836 device_delete_child(sc->sc_dev, sc->sc_mii); 837} 838 839/* 840 * Change media according to request. 841 */ 842static int 843npe_ifmedia_update(struct ifnet *ifp) 844{ 845 struct npe_softc *sc = ifp->if_softc; 846 struct mii_data *mii; 847 848 mii = device_get_softc(sc->sc_mii); 849 NPE_LOCK(sc); 850 mii_mediachg(mii); 851 /* XXX push state ourself? */ 852 NPE_UNLOCK(sc); 853 return (0); 854} 855 856/* 857 * Notify the world which media we're using. 858 */ 859static void 860npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 861{ 862 struct npe_softc *sc = ifp->if_softc; 863 struct mii_data *mii; 864 865 mii = device_get_softc(sc->sc_mii); 866 NPE_LOCK(sc); 867 mii_pollstat(mii); 868 ifmr->ifm_active = mii->mii_media_active; 869 ifmr->ifm_status = mii->mii_media_status; 870 NPE_UNLOCK(sc); 871} 872 873static void 874npe_addstats(struct npe_softc *sc) 875{ 876#define NPEADD(x) sc->sc_totals.x += be32toh(ns->x) 877#define MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0) 878 struct ifnet *ifp = sc->sc_ifp; 879 struct npestats *ns = sc->sc_stats; 880 881 MIBADD(dot3StatsAlignmentErrors); 882 MIBADD(dot3StatsFCSErrors); 883 MIBADD(dot3StatsInternalMacReceiveErrors); 884 NPEADD(RxOverrunDiscards); 885 NPEADD(RxLearnedEntryDiscards); 886 NPEADD(RxLargeFramesDiscards); 887 NPEADD(RxSTPBlockedDiscards); 888 NPEADD(RxVLANTypeFilterDiscards); 889 NPEADD(RxVLANIdFilterDiscards); 890 NPEADD(RxInvalidSourceDiscards); 891 NPEADD(RxBlackListDiscards); 892 NPEADD(RxWhiteListDiscards); 893 NPEADD(RxUnderflowEntryDiscards); 894 MIBADD(dot3StatsSingleCollisionFrames); 895 MIBADD(dot3StatsMultipleCollisionFrames); 896 MIBADD(dot3StatsDeferredTransmissions); 897 MIBADD(dot3StatsLateCollisions); 898 MIBADD(dot3StatsExcessiveCollisions); 899 MIBADD(dot3StatsInternalMacTransmitErrors); 900 MIBADD(dot3StatsCarrierSenseErrors); 901 NPEADD(TxLargeFrameDiscards); 902 NPEADD(TxVLANIdFilterDiscards); 903 904 sc->mibdata.dot3StatsFrameTooLongs += 905 be32toh(ns->RxLargeFramesDiscards) 906 + be32toh(ns->TxLargeFrameDiscards); 907 sc->mibdata.dot3StatsMissedFrames += 908 be32toh(ns->RxOverrunDiscards) 909 + be32toh(ns->RxUnderflowEntryDiscards); 910 911 ifp->if_oerrors += 912 be32toh(ns->dot3StatsInternalMacTransmitErrors) 913 + be32toh(ns->dot3StatsCarrierSenseErrors) 914 + be32toh(ns->TxVLANIdFilterDiscards) 915 ; 916 ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors) 917 + be32toh(ns->dot3StatsInternalMacReceiveErrors) 918 + be32toh(ns->RxOverrunDiscards) 919 + be32toh(ns->RxUnderflowEntryDiscards) 920 ; 921 ifp->if_collisions += 922 be32toh(ns->dot3StatsSingleCollisionFrames) 923 + be32toh(ns->dot3StatsMultipleCollisionFrames) 924 ; 925#undef NPEADD 926#undef MIBADD 927} 928 929static void 930npe_tick(void *xsc) 931{ 932#define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL) 933 struct npe_softc *sc = xsc; 934 struct mii_data *mii = device_get_softc(sc->sc_mii); 935 uint32_t msg[2]; 936 937 NPE_ASSERT_LOCKED(sc); 938 939 /* 940 * NB: to avoid sleeping with the softc lock held we 941 * split the NPE msg processing into two parts. The 942 * request for statistics is sent w/o waiting for a 943 * reply and then on the next tick we retrieve the 944 * results. This works because npe_tick is the only 945 * code that talks via the mailbox's (except at setup). 946 * This likely can be handled better. 947 */ 948 if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) { 949 bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map, 950 BUS_DMASYNC_POSTREAD); 951 npe_addstats(sc); 952 } 953 npe_updatestats(sc); 954 mii_tick(mii); 955 956 npewatchdog(sc); 957 958 /* schedule next poll */ 959 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc); 960#undef ACK 961} 962 963static void 964npe_setmac(struct npe_softc *sc, u_char *eaddr) 965{ 966 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]); 967 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]); 968 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]); 969 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]); 970 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]); 971 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]); 972} 973 974static void 975npe_getmac(struct npe_softc *sc, u_char *eaddr) 976{ 977 /* NB: the unicast address appears to be loaded from EEPROM on reset */ 978 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff; 979 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff; 980 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff; 981 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff; 982 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff; 983 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff; 984} 985 986struct txdone { 987 struct npebuf *head; 988 struct npebuf **tail; 989 int count; 990}; 991 992static __inline void 993npe_txdone_finish(struct npe_softc *sc, const struct txdone *td) 994{ 995 struct ifnet *ifp = sc->sc_ifp; 996 997 NPE_LOCK(sc); 998 *td->tail = sc->tx_free; 999 sc->tx_free = td->head; 1000 /* 1001 * We're no longer busy, so clear the busy flag and call the 1002 * start routine to xmit more packets. 1003 */ 1004 ifp->if_opackets += td->count; 1005 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1006 sc->npe_watchdog_timer = 0; 1007 npestart_locked(ifp); 1008 NPE_UNLOCK(sc); 1009} 1010 1011/* 1012 * Q manager callback on tx done queue. Reap mbufs 1013 * and return tx buffers to the free list. Finally 1014 * restart output. Note the microcode has only one 1015 * txdone q wired into it so we must use the NPE ID 1016 * returned with each npehwbuf to decide where to 1017 * send buffers. 1018 */ 1019static void 1020npe_txdone(int qid, void *arg) 1021{ 1022#define P2V(a, dma) \ 1023 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 1024 struct npe_softc *sc0 = arg; 1025 struct npe_softc *sc; 1026 struct npebuf *npe; 1027 struct txdone *td, q[NPE_MAX]; 1028 uint32_t entry; 1029 1030 q[NPE_A].tail = &q[NPE_A].head; q[NPE_A].count = 0; 1031 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0; 1032 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0; 1033 /* XXX max # at a time? */ 1034 while (ixpqmgr_qread(qid, &entry) == 0) { 1035 DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n", 1036 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry)); 1037 1038 sc = npes[NPE_QM_Q_NPE(entry)]; 1039 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma); 1040 m_freem(npe->ix_m); 1041 npe->ix_m = NULL; 1042 1043 td = &q[NPE_QM_Q_NPE(entry)]; 1044 *td->tail = npe; 1045 td->tail = &npe->ix_next; 1046 td->count++; 1047 } 1048 1049 if (q[NPE_A].count) 1050 npe_txdone_finish(npes[NPE_A], &q[NPE_A]); 1051 if (q[NPE_B].count) 1052 npe_txdone_finish(npes[NPE_B], &q[NPE_B]); 1053 if (q[NPE_C].count) 1054 npe_txdone_finish(npes[NPE_C], &q[NPE_C]); 1055#undef P2V 1056} 1057 1058static int 1059npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m) 1060{ 1061 bus_dma_segment_t segs[1]; 1062 struct npedma *dma = &sc->rxdma; 1063 struct npehwbuf *hw; 1064 int error, nseg; 1065 1066 if (m == NULL) { 1067 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1068 if (m == NULL) 1069 return ENOBUFS; 1070 } 1071 KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN, 1072 ("ext_size %d", m->m_ext.ext_size)); 1073 m->m_pkthdr.len = m->m_len = 1536; 1074 /* backload payload and align ip hdr */ 1075 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN)); 1076 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m, 1077 segs, &nseg, 0); 1078 if (error != 0) { 1079 m_freem(m); 1080 return error; 1081 } 1082 hw = npe->ix_hw; 1083 hw->ix_ne[0].data = htobe32(segs[0].ds_addr); 1084 /* NB: NPE requires length be a multiple of 64 */ 1085 /* NB: buffer length is shifted in word */ 1086 hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16); 1087 hw->ix_ne[0].next = 0; 1088 npe->ix_m = m; 1089 /* Flush the memory in the mbuf */ 1090 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD); 1091 return 0; 1092} 1093 1094/* 1095 * RX q processing for a specific NPE. Claim entries 1096 * from the hardware queue and pass the frames up the 1097 * stack. Pass the rx buffers to the free list. 1098 */ 1099static int 1100npe_rxdone(int qid, void *arg) 1101{ 1102#define P2V(a, dma) \ 1103 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 1104 struct npe_softc *sc = arg; 1105 struct npedma *dma = &sc->rxdma; 1106 uint32_t entry; 1107 int rx_npkts = 0; 1108 1109 while (ixpqmgr_qread(qid, &entry) == 0) { 1110 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma); 1111 struct mbuf *m; 1112 1113 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n", 1114 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len); 1115 /* 1116 * Allocate a new mbuf to replenish the rx buffer. 1117 * If doing so fails we drop the rx'd frame so we 1118 * can reuse the previous mbuf. When we're able to 1119 * allocate a new mbuf dispatch the mbuf w/ rx'd 1120 * data up the stack and replace it with the newly 1121 * allocated one. 1122 */ 1123 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1124 if (m != NULL) { 1125 struct mbuf *mrx = npe->ix_m; 1126 struct npehwbuf *hw = npe->ix_hw; 1127 struct ifnet *ifp = sc->sc_ifp; 1128 1129 /* Flush mbuf memory for rx'd data */ 1130 bus_dmamap_sync(dma->mtag, npe->ix_map, 1131 BUS_DMASYNC_POSTREAD); 1132 1133 /* XXX flush hw buffer; works now 'cuz coherent */ 1134 /* set m_len etc. per rx frame size */ 1135 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff; 1136 mrx->m_pkthdr.len = mrx->m_len; 1137 mrx->m_pkthdr.rcvif = ifp; 1138 1139 ifp->if_ipackets++; 1140 ifp->if_input(ifp, mrx); 1141 rx_npkts++; 1142 } else { 1143 /* discard frame and re-use mbuf */ 1144 m = npe->ix_m; 1145 } 1146 if (npe_rxbuf_init(sc, npe, m) == 0) { 1147 /* return npe buf to rx free list */ 1148 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1149 } else { 1150 /* XXX should not happen */ 1151 } 1152 } 1153 return rx_npkts; 1154#undef P2V 1155} 1156 1157#ifdef DEVICE_POLLING 1158static int 1159npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1160{ 1161 struct npe_softc *sc = ifp->if_softc; 1162 int rx_npkts = 0; 1163 1164 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1165 rx_npkts = npe_rxdone(sc->rx_qid, sc); 1166 npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */ 1167 } 1168 return rx_npkts; 1169} 1170#endif /* DEVICE_POLLING */ 1171 1172static void 1173npe_startxmit(struct npe_softc *sc) 1174{ 1175 struct npedma *dma = &sc->txdma; 1176 int i; 1177 1178 NPE_ASSERT_LOCKED(sc); 1179 sc->tx_free = NULL; 1180 for (i = 0; i < dma->nbuf; i++) { 1181 struct npebuf *npe = &dma->buf[i]; 1182 if (npe->ix_m != NULL) { 1183 /* NB: should not happen */ 1184 device_printf(sc->sc_dev, 1185 "%s: free mbuf at entry %u\n", __func__, i); 1186 m_freem(npe->ix_m); 1187 } 1188 npe->ix_m = NULL; 1189 npe->ix_next = sc->tx_free; 1190 sc->tx_free = npe; 1191 } 1192} 1193 1194static void 1195npe_startrecv(struct npe_softc *sc) 1196{ 1197 struct npedma *dma = &sc->rxdma; 1198 struct npebuf *npe; 1199 int i; 1200 1201 NPE_ASSERT_LOCKED(sc); 1202 for (i = 0; i < dma->nbuf; i++) { 1203 npe = &dma->buf[i]; 1204 npe_rxbuf_init(sc, npe, npe->ix_m); 1205 /* set npe buf on rx free list */ 1206 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1207 } 1208} 1209 1210/* 1211 * Reset and initialize the chip 1212 */ 1213static void 1214npeinit_locked(void *xsc) 1215{ 1216 struct npe_softc *sc = xsc; 1217 struct ifnet *ifp = sc->sc_ifp; 1218 1219 NPE_ASSERT_LOCKED(sc); 1220if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/ 1221 1222 /* 1223 * Reset MAC core. 1224 */ 1225 npe_mac_reset(sc); 1226 1227 /* disable transmitter and reciver in the MAC */ 1228 WR4(sc, NPE_MAC_RX_CNTRL1, 1229 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1230 WR4(sc, NPE_MAC_TX_CNTRL1, 1231 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1232 1233 /* 1234 * Set the MAC core registers. 1235 */ 1236 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */ 1237 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */ 1238 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */ 1239 /* thresholds determined by NPE firmware FS */ 1240 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12); 1241 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30); 1242 WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */ 1243 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */ 1244 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/ 1245 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */ 1246 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */ 1247 WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */ 1248 1249 WR4(sc, NPE_MAC_TX_CNTRL1, 1250 NPE_TX_CNTRL1_RETRY /* retry failed xmits */ 1251 | NPE_TX_CNTRL1_FCS_EN /* append FCS */ 1252 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */ 1253 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */ 1254 /* XXX pad strip? */ 1255 /* ena pause frame handling */ 1256 WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN); 1257 WR4(sc, NPE_MAC_RX_CNTRL2, 0); 1258 1259 npe_setmac(sc, IF_LLADDR(ifp)); 1260 npe_setportaddress(sc, IF_LLADDR(ifp)); 1261 npe_setmcast(sc); 1262 1263 npe_startxmit(sc); 1264 npe_startrecv(sc); 1265 1266 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1267 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1268 sc->npe_watchdog_timer = 0; /* just in case */ 1269 1270 /* enable transmitter and reciver in the MAC */ 1271 WR4(sc, NPE_MAC_RX_CNTRL1, 1272 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN); 1273 WR4(sc, NPE_MAC_TX_CNTRL1, 1274 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN); 1275 1276 callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc); 1277} 1278 1279static void 1280npeinit(void *xsc) 1281{ 1282 struct npe_softc *sc = xsc; 1283 NPE_LOCK(sc); 1284 npeinit_locked(sc); 1285 NPE_UNLOCK(sc); 1286} 1287 1288/* 1289 * Dequeue packets and place on the h/w transmit queue. 1290 */ 1291static void 1292npestart_locked(struct ifnet *ifp) 1293{ 1294 struct npe_softc *sc = ifp->if_softc; 1295 struct npebuf *npe; 1296 struct npehwbuf *hw; 1297 struct mbuf *m, *n; 1298 struct npedma *dma = &sc->txdma; 1299 bus_dma_segment_t segs[NPE_MAXSEG]; 1300 int nseg, len, error, i; 1301 uint32_t next; 1302 1303 NPE_ASSERT_LOCKED(sc); 1304 /* XXX can this happen? */ 1305 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) 1306 return; 1307 1308 while (sc->tx_free != NULL) { 1309 IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1310 if (m == NULL) { 1311 /* XXX? */ 1312 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1313 return; 1314 } 1315 npe = sc->tx_free; 1316 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, 1317 m, segs, &nseg, 0); 1318 if (error == EFBIG) { 1319 n = m_collapse(m, M_NOWAIT, NPE_MAXSEG); 1320 if (n == NULL) { 1321 if_printf(ifp, "%s: too many fragments %u\n", 1322 __func__, nseg); 1323 m_freem(m); 1324 return; /* XXX? */ 1325 } 1326 m = n; 1327 error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, 1328 m, segs, &nseg, 0); 1329 } 1330 if (error != 0 || nseg == 0) { 1331 if_printf(ifp, "%s: error %u nseg %u\n", 1332 __func__, error, nseg); 1333 m_freem(m); 1334 return; /* XXX? */ 1335 } 1336 sc->tx_free = npe->ix_next; 1337 1338 bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE); 1339 1340 /* 1341 * Tap off here if there is a bpf listener. 1342 */ 1343 BPF_MTAP(ifp, m); 1344 1345 npe->ix_m = m; 1346 hw = npe->ix_hw; 1347 len = m->m_pkthdr.len; 1348 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]); 1349 for (i = 0; i < nseg; i++) { 1350 hw->ix_ne[i].data = htobe32(segs[i].ds_addr); 1351 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len); 1352 hw->ix_ne[i].next = htobe32(next); 1353 1354 len = 0; /* zero for segments > 1 */ 1355 next += sizeof(hw->ix_ne[0]); 1356 } 1357 hw->ix_ne[i-1].next = 0; /* zero last in chain */ 1358 /* XXX flush descriptor instead of using uncached memory */ 1359 1360 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n", 1361 __func__, sc->tx_qid, npe->ix_neaddr, 1362 hw->ix_ne[0].data, hw->ix_ne[0].len); 1363 /* stick it on the tx q */ 1364 /* XXX add vlan priority */ 1365 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr); 1366 1367 sc->npe_watchdog_timer = 5; 1368 } 1369 if (sc->tx_free == NULL) 1370 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1371} 1372 1373void 1374npestart(struct ifnet *ifp) 1375{ 1376 struct npe_softc *sc = ifp->if_softc; 1377 NPE_LOCK(sc); 1378 npestart_locked(ifp); 1379 NPE_UNLOCK(sc); 1380} 1381 1382static void 1383npe_stopxmit(struct npe_softc *sc) 1384{ 1385 struct npedma *dma = &sc->txdma; 1386 int i; 1387 1388 NPE_ASSERT_LOCKED(sc); 1389 1390 /* XXX qmgr */ 1391 for (i = 0; i < dma->nbuf; i++) { 1392 struct npebuf *npe = &dma->buf[i]; 1393 1394 if (npe->ix_m != NULL) { 1395 bus_dmamap_unload(dma->mtag, npe->ix_map); 1396 m_freem(npe->ix_m); 1397 npe->ix_m = NULL; 1398 } 1399 } 1400} 1401 1402static void 1403npe_stoprecv(struct npe_softc *sc) 1404{ 1405 struct npedma *dma = &sc->rxdma; 1406 int i; 1407 1408 NPE_ASSERT_LOCKED(sc); 1409 1410 /* XXX qmgr */ 1411 for (i = 0; i < dma->nbuf; i++) { 1412 struct npebuf *npe = &dma->buf[i]; 1413 1414 if (npe->ix_m != NULL) { 1415 bus_dmamap_unload(dma->mtag, npe->ix_map); 1416 m_freem(npe->ix_m); 1417 npe->ix_m = NULL; 1418 } 1419 } 1420} 1421 1422/* 1423 * Turn off interrupts, and stop the nic. 1424 */ 1425void 1426npestop(struct npe_softc *sc) 1427{ 1428 struct ifnet *ifp = sc->sc_ifp; 1429 1430 /* disable transmitter and reciver in the MAC */ 1431 WR4(sc, NPE_MAC_RX_CNTRL1, 1432 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1433 WR4(sc, NPE_MAC_TX_CNTRL1, 1434 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1435 1436 sc->npe_watchdog_timer = 0; 1437 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1438 1439 callout_stop(&sc->tick_ch); 1440 1441 npe_stopxmit(sc); 1442 npe_stoprecv(sc); 1443 /* XXX go into loopback & drain q's? */ 1444 /* XXX but beware of disabling tx above */ 1445 1446 /* 1447 * The MAC core rx/tx disable may leave the MAC hardware in an 1448 * unpredictable state. A hw reset is executed before resetting 1449 * all the MAC parameters to a known value. 1450 */ 1451 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1452 DELAY(NPE_MAC_RESET_DELAY); 1453 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT); 1454 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1455} 1456 1457void 1458npewatchdog(struct npe_softc *sc) 1459{ 1460 NPE_ASSERT_LOCKED(sc); 1461 1462 if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0) 1463 return; 1464 1465 device_printf(sc->sc_dev, "watchdog timeout\n"); 1466 sc->sc_ifp->if_oerrors++; 1467 1468 npeinit_locked(sc); 1469} 1470 1471static int 1472npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1473{ 1474 struct npe_softc *sc = ifp->if_softc; 1475 struct mii_data *mii; 1476 struct ifreq *ifr = (struct ifreq *)data; 1477 int error = 0; 1478#ifdef DEVICE_POLLING 1479 int mask; 1480#endif 1481 1482 switch (cmd) { 1483 case SIOCSIFFLAGS: 1484 NPE_LOCK(sc); 1485 if ((ifp->if_flags & IFF_UP) == 0 && 1486 ifp->if_drv_flags & IFF_DRV_RUNNING) { 1487 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1488 npestop(sc); 1489 } else { 1490 /* reinitialize card on any parameter change */ 1491 npeinit_locked(sc); 1492 } 1493 NPE_UNLOCK(sc); 1494 break; 1495 1496 case SIOCADDMULTI: 1497 case SIOCDELMULTI: 1498 /* update multicast filter list. */ 1499 NPE_LOCK(sc); 1500 npe_setmcast(sc); 1501 NPE_UNLOCK(sc); 1502 error = 0; 1503 break; 1504 1505 case SIOCSIFMEDIA: 1506 case SIOCGIFMEDIA: 1507 mii = device_get_softc(sc->sc_mii); 1508 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1509 break; 1510 1511#ifdef DEVICE_POLLING 1512 case SIOCSIFCAP: 1513 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 1514 if (mask & IFCAP_POLLING) { 1515 if (ifr->ifr_reqcap & IFCAP_POLLING) { 1516 error = ether_poll_register(npe_poll, ifp); 1517 if (error) 1518 return error; 1519 NPE_LOCK(sc); 1520 /* disable callbacks XXX txdone is shared */ 1521 ixpqmgr_notify_disable(sc->rx_qid); 1522 ixpqmgr_notify_disable(sc->tx_doneqid); 1523 ifp->if_capenable |= IFCAP_POLLING; 1524 NPE_UNLOCK(sc); 1525 } else { 1526 error = ether_poll_deregister(ifp); 1527 /* NB: always enable qmgr callbacks */ 1528 NPE_LOCK(sc); 1529 /* enable qmgr callbacks */ 1530 ixpqmgr_notify_enable(sc->rx_qid, 1531 IX_QMGR_Q_SOURCE_ID_NOT_E); 1532 ixpqmgr_notify_enable(sc->tx_doneqid, 1533 IX_QMGR_Q_SOURCE_ID_NOT_E); 1534 ifp->if_capenable &= ~IFCAP_POLLING; 1535 NPE_UNLOCK(sc); 1536 } 1537 } 1538 break; 1539#endif 1540 default: 1541 error = ether_ioctl(ifp, cmd, data); 1542 break; 1543 } 1544 return error; 1545} 1546 1547/* 1548 * Setup a traffic class -> rx queue mapping. 1549 */ 1550static int 1551npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid) 1552{ 1553 uint32_t msg[2]; 1554 1555 msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix; 1556 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4); 1557 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); 1558} 1559 1560static int 1561npe_setportaddress(struct npe_softc *sc, const uint8_t mac[ETHER_ADDR_LEN]) 1562{ 1563 uint32_t msg[2]; 1564 1565 msg[0] = (NPE_SETPORTADDRESS << 24) 1566 | (sc->sc_npeid << 20) 1567 | (mac[0] << 8) 1568 | (mac[1] << 0); 1569 msg[1] = (mac[2] << 24) 1570 | (mac[3] << 16) 1571 | (mac[4] << 8) 1572 | (mac[5] << 0); 1573 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); 1574} 1575 1576static int 1577npe_setfirewallmode(struct npe_softc *sc, int onoff) 1578{ 1579 uint32_t msg[2]; 1580 1581 /* XXX honor onoff */ 1582 msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20); 1583 msg[1] = 0; 1584 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); 1585} 1586 1587/* 1588 * Update and reset the statistics in the NPE. 1589 */ 1590static int 1591npe_updatestats(struct npe_softc *sc) 1592{ 1593 uint32_t msg[2]; 1594 1595 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL; 1596 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1597 return ixpnpe_sendmsg_async(sc->sc_npe, msg); 1598} 1599 1600#if 0 1601/* 1602 * Get the current statistics block. 1603 */ 1604static int 1605npe_getstats(struct npe_softc *sc) 1606{ 1607 uint32_t msg[2]; 1608 1609 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL; 1610 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1611 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1612} 1613 1614/* 1615 * Query the image id of the loaded firmware. 1616 */ 1617static uint32_t 1618npe_getimageid(struct npe_softc *sc) 1619{ 1620 uint32_t msg[2]; 1621 1622 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL; 1623 msg[1] = 0; 1624 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0; 1625} 1626 1627/* 1628 * Enable/disable loopback. 1629 */ 1630static int 1631npe_setloopback(struct npe_softc *sc, int ena) 1632{ 1633 uint32_t msg[2]; 1634 1635 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0); 1636 msg[1] = 0; 1637 return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); 1638} 1639#endif 1640 1641static void 1642npe_child_detached(device_t dev, device_t child) 1643{ 1644 struct npe_softc *sc; 1645 1646 sc = device_get_softc(dev); 1647 if (child == sc->sc_mii) 1648 sc->sc_mii = NULL; 1649} 1650 1651/* 1652 * MII bus support routines. 1653 */ 1654#define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg) 1655#define MII_WR4(sc, reg, v) \ 1656 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v) 1657 1658static uint32_t 1659npe_mii_mdio_read(struct npe_softc *sc, int reg) 1660{ 1661 uint32_t v; 1662 1663 /* NB: registers are known to be sequential */ 1664 v = (MII_RD4(sc, reg+0) & 0xff) << 0; 1665 v |= (MII_RD4(sc, reg+4) & 0xff) << 8; 1666 v |= (MII_RD4(sc, reg+8) & 0xff) << 16; 1667 v |= (MII_RD4(sc, reg+12) & 0xff) << 24; 1668 return v; 1669} 1670 1671static void 1672npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd) 1673{ 1674 /* NB: registers are known to be sequential */ 1675 MII_WR4(sc, reg+0, cmd & 0xff); 1676 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff); 1677 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff); 1678 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff); 1679} 1680 1681static int 1682npe_mii_mdio_wait(struct npe_softc *sc) 1683{ 1684 uint32_t v; 1685 int i; 1686 1687 /* NB: typically this takes 25-30 trips */ 1688 for (i = 0; i < 1000; i++) { 1689 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD); 1690 if ((v & NPE_MII_GO) == 0) 1691 return 1; 1692 DELAY(1); 1693 } 1694 device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n", 1695 __func__, v); 1696 return 0; /* NB: timeout */ 1697} 1698 1699static int 1700npe_miibus_readreg(device_t dev, int phy, int reg) 1701{ 1702 struct npe_softc *sc = device_get_softc(dev); 1703 uint32_t v; 1704 1705 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO; 1706 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1707 if (npe_mii_mdio_wait(sc)) 1708 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS); 1709 else 1710 v = 0xffff | NPE_MII_READ_FAIL; 1711 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff); 1712} 1713 1714static int 1715npe_miibus_writereg(device_t dev, int phy, int reg, int data) 1716{ 1717 struct npe_softc *sc = device_get_softc(dev); 1718 uint32_t v; 1719 1720 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1721 | data | NPE_MII_WRITE 1722 | NPE_MII_GO; 1723 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1724 /* XXX complain about timeout */ 1725 (void) npe_mii_mdio_wait(sc); 1726 return (0); 1727} 1728 1729static void 1730npe_miibus_statchg(device_t dev) 1731{ 1732 struct npe_softc *sc = device_get_softc(dev); 1733 struct mii_data *mii = device_get_softc(sc->sc_mii); 1734 uint32_t tx1, rx1; 1735 1736 /* sync MAC duplex state */ 1737 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1); 1738 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1); 1739 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1740 tx1 &= ~NPE_TX_CNTRL1_DUPLEX; 1741 rx1 |= NPE_RX_CNTRL1_PAUSE_EN; 1742 } else { 1743 tx1 |= NPE_TX_CNTRL1_DUPLEX; 1744 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN; 1745 } 1746 WR4(sc, NPE_MAC_RX_CNTRL1, rx1); 1747 WR4(sc, NPE_MAC_TX_CNTRL1, tx1); 1748} 1749 1750static device_method_t npe_methods[] = { 1751 /* Device interface */ 1752 DEVMETHOD(device_probe, npe_probe), 1753 DEVMETHOD(device_attach, npe_attach), 1754 DEVMETHOD(device_detach, npe_detach), 1755 1756 /* Bus interface */ 1757 DEVMETHOD(bus_child_detached, npe_child_detached), 1758 1759 /* MII interface */ 1760 DEVMETHOD(miibus_readreg, npe_miibus_readreg), 1761 DEVMETHOD(miibus_writereg, npe_miibus_writereg), 1762 DEVMETHOD(miibus_statchg, npe_miibus_statchg), 1763 1764 { 0, 0 } 1765}; 1766 1767static driver_t npe_driver = { 1768 "npe", 1769 npe_methods, 1770 sizeof(struct npe_softc), 1771}; 1772 1773DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0); 1774DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0); 1775MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1); 1776MODULE_DEPEND(npe, miibus, 1, 1, 1); 1777MODULE_DEPEND(npe, ether, 1, 1, 1); 1778