dp83932.c revision 1.7
1/* $NetBSD: dp83932.c,v 1.7 2003/01/18 13:12:55 tsutsui Exp $ */ 2 3/*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39/* 40 * Device driver for the National Semiconductor DP83932 41 * Systems-Oriented Network Interface Controller (SONIC). 42 */ 43 44#include <sys/cdefs.h> 45__KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.7 2003/01/18 13:12:55 tsutsui Exp $"); 46 47#include "bpfilter.h" 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/mbuf.h> 52#include <sys/malloc.h> 53#include <sys/kernel.h> 54#include <sys/socket.h> 55#include <sys/ioctl.h> 56#include <sys/errno.h> 57#include <sys/device.h> 58 59#include <uvm/uvm_extern.h> 60 61#include <net/if.h> 62#include <net/if_dl.h> 63#include <net/if_ether.h> 64 65#if NBPFILTER > 0 66#include <net/bpf.h> 67#endif 68 69#include <machine/bus.h> 70#include <machine/intr.h> 71 72#include <dev/ic/dp83932reg.h> 73#include <dev/ic/dp83932var.h> 74 75void sonic_start(struct ifnet *); 76void sonic_watchdog(struct ifnet *); 77int sonic_ioctl(struct ifnet *, u_long, caddr_t); 78int sonic_init(struct ifnet *); 79void sonic_stop(struct ifnet *, int); 80 81void sonic_shutdown(void *); 82 83void sonic_reset(struct sonic_softc *); 84void sonic_rxdrain(struct sonic_softc *); 85int sonic_add_rxbuf(struct sonic_softc *, int); 86void sonic_set_filter(struct sonic_softc *); 87 88uint16_t sonic_txintr(struct sonic_softc *); 89void sonic_rxintr(struct sonic_softc *); 90 91int sonic_copy_small = 0; 92 93#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 94 95/* 96 * sonic_attach: 97 * 98 * Attach a SONIC interface to the system. 99 */ 100void 101sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr) 102{ 103 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 104 int i, rseg, error; 105 bus_dma_segment_t seg; 106 size_t cdatasize; 107 char *nullbuf; 108 109 /* 110 * Allocate the control data structures, and create and load the 111 * DMA map for it. 112 */ 113 if (sc->sc_32bit) 114 cdatasize = sizeof(struct sonic_control_data32); 115 else 116 cdatasize = sizeof(struct sonic_control_data16); 117 118 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN, 119 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg, 120 BUS_DMA_NOWAIT)) != 0) { 121 printf("%s: unable to allocate control data, error = %d\n", 122 sc->sc_dev.dv_xname, error); 123 goto fail_0; 124 } 125 126 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 127 cdatasize + ETHER_PAD_LEN, (caddr_t *) &sc->sc_cdata16, 128 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 129 printf("%s: unable to map control data, error = %d\n", 130 sc->sc_dev.dv_xname, error); 131 goto fail_1; 132 } 133 nullbuf = (char *)sc->sc_cdata16 + cdatasize; 134 memset(nullbuf, 0, ETHER_PAD_LEN); 135 136 if ((error = bus_dmamap_create(sc->sc_dmat, 137 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT, 138 &sc->sc_cddmamap)) != 0) { 139 printf("%s: unable to create control data DMA map, " 140 "error = %d\n", sc->sc_dev.dv_xname, error); 141 goto fail_2; 142 } 143 144 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 145 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) { 146 printf("%s: unable to load control data DMA map, error = %d\n", 147 sc->sc_dev.dv_xname, error); 148 goto fail_3; 149 } 150 151 /* 152 * Create the transmit buffer DMA maps. 153 */ 154 for (i = 0; i < SONIC_NTXDESC; i++) { 155 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 156 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 157 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 158 printf("%s: unable to create tx DMA map %d, " 159 "error = %d\n", sc->sc_dev.dv_xname, i, error); 160 goto fail_4; 161 } 162 } 163 164 /* 165 * Create the receive buffer DMA maps. 166 */ 167 for (i = 0; i < SONIC_NRXDESC; i++) { 168 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 169 MCLBYTES, 0, BUS_DMA_NOWAIT, 170 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 171 printf("%s: unable to create rx DMA map %d, " 172 "error = %d\n", sc->sc_dev.dv_xname, i, error); 173 goto fail_5; 174 } 175 sc->sc_rxsoft[i].ds_mbuf = NULL; 176 } 177 178 /* 179 * create and map the pad buffer 180 */ 181 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 182 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { 183 printf("%s: unable to create pad buffer DMA map, " 184 "error = %d\n", sc->sc_dev.dv_xname, error); 185 goto fail_5; 186 } 187 188 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 189 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 190 printf("%s: unable to load pad buffer DMA map, " 191 "error = %d\n", sc->sc_dev.dv_xname, error); 192 goto fail_6; 193 } 194 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 195 BUS_DMASYNC_PREWRITE); 196 197 /* 198 * Reset the chip to a known state. 199 */ 200 sonic_reset(sc); 201 202 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 203 ether_sprintf(enaddr)); 204 205 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 206 ifp->if_softc = sc; 207 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 208 ifp->if_ioctl = sonic_ioctl; 209 ifp->if_start = sonic_start; 210 ifp->if_watchdog = sonic_watchdog; 211 ifp->if_init = sonic_init; 212 ifp->if_stop = sonic_stop; 213 IFQ_SET_READY(&ifp->if_snd); 214 215 /* 216 * Attach the interface. 217 */ 218 if_attach(ifp); 219 ether_ifattach(ifp, enaddr); 220 221 /* 222 * Make sure the interface is shutdown during reboot. 223 */ 224 sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc); 225 if (sc->sc_sdhook == NULL) 226 printf("%s: WARNING: unable to establish shutdown hook\n", 227 sc->sc_dev.dv_xname); 228 return; 229 230 /* 231 * Free any resources we've allocated during the failed attach 232 * attempt. Do this in reverse order and fall through. 233 */ 234 fail_6: 235 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 236 fail_5: 237 for (i = 0; i < SONIC_NRXDESC; i++) { 238 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 239 bus_dmamap_destroy(sc->sc_dmat, 240 sc->sc_rxsoft[i].ds_dmamap); 241 } 242 fail_4: 243 for (i = 0; i < SONIC_NTXDESC; i++) { 244 if (sc->sc_txsoft[i].ds_dmamap != NULL) 245 bus_dmamap_destroy(sc->sc_dmat, 246 sc->sc_txsoft[i].ds_dmamap); 247 } 248 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 249 fail_3: 250 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 251 fail_2: 252 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_cdata16, cdatasize); 253 fail_1: 254 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 255 fail_0: 256 return; 257} 258 259/* 260 * sonic_shutdown: 261 * 262 * Make sure the interface is stopped at reboot. 263 */ 264void 265sonic_shutdown(void *arg) 266{ 267 struct sonic_softc *sc = arg; 268 269 sonic_stop(&sc->sc_ethercom.ec_if, 1); 270} 271 272/* 273 * sonic_start: [ifnet interface function] 274 * 275 * Start packet transmission on the interface. 276 */ 277void 278sonic_start(struct ifnet *ifp) 279{ 280 struct sonic_softc *sc = ifp->if_softc; 281 struct mbuf *m0, *m; 282 struct sonic_tda16 *tda16; 283 struct sonic_tda32 *tda32; 284 struct sonic_descsoft *ds; 285 bus_dmamap_t dmamap; 286 int error, olasttx, nexttx, opending, seg, totlen, olseg; 287 288 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 289 return; 290 291 /* 292 * Remember the previous txpending and the current "last txdesc 293 * used" index. 294 */ 295 opending = sc->sc_txpending; 296 olasttx = sc->sc_txlast; 297 298 /* 299 * Loop through the send queue, setting up transmit descriptors 300 * until we drain the queue, or use up all available transmit 301 * descriptors. Leave one at the end for sanity's sake. 302 */ 303 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) { 304 /* 305 * Grab a packet off the queue. 306 */ 307 IFQ_POLL(&ifp->if_snd, m0); 308 if (m0 == NULL) 309 break; 310 m = NULL; 311 312 /* 313 * Get the next available transmit descriptor. 314 */ 315 nexttx = SONIC_NEXTTX(sc->sc_txlast); 316 ds = &sc->sc_txsoft[nexttx]; 317 dmamap = ds->ds_dmamap; 318 319 /* 320 * Load the DMA map. If this fails, the packet either 321 * didn't fit in the allotted number of frags, or we were 322 * short on resources. In this case, we'll copy and try 323 * again. 324 */ 325 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 326 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 327 (m0->m_pkthdr.len < ETHER_PAD_LEN && 328 dmamap->dm_nsegs == SONIC_NTXFRAGS)) { 329 if (error == 0) 330 bus_dmamap_unload(sc->sc_dmat, dmamap); 331 MGETHDR(m, M_DONTWAIT, MT_DATA); 332 if (m == NULL) { 333 printf("%s: unable to allocate Tx mbuf\n", 334 sc->sc_dev.dv_xname); 335 break; 336 } 337 if (m0->m_pkthdr.len > MHLEN) { 338 MCLGET(m, M_DONTWAIT); 339 if ((m->m_flags & M_EXT) == 0) { 340 printf("%s: unable to allocate Tx " 341 "cluster\n", sc->sc_dev.dv_xname); 342 m_freem(m); 343 break; 344 } 345 } 346 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 347 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 348 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 349 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 350 if (error) { 351 printf("%s: unable to load Tx buffer, " 352 "error = %d\n", sc->sc_dev.dv_xname, error); 353 m_freem(m); 354 break; 355 } 356 } 357 IFQ_DEQUEUE(&ifp->if_snd, m0); 358 if (m != NULL) { 359 m_freem(m0); 360 m0 = m; 361 } 362 363 /* 364 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 365 */ 366 367 /* Sync the DMA map. */ 368 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 369 BUS_DMASYNC_PREWRITE); 370 371 /* 372 * Store a pointer to the packet so we can free it later. 373 */ 374 ds->ds_mbuf = m0; 375 376 /* 377 * Initialize the transmit descriptor. 378 */ 379 totlen = 0; 380 if (sc->sc_32bit) { 381 tda32 = &sc->sc_tda32[nexttx]; 382 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 383 tda32->tda_frags[seg].frag_ptr1 = 384 htosonic32(sc, 385 (dmamap->dm_segs[seg].ds_addr >> 16) & 386 0xffff); 387 tda32->tda_frags[seg].frag_ptr0 = 388 htosonic32(sc, 389 dmamap->dm_segs[seg].ds_addr & 0xffff); 390 tda32->tda_frags[seg].frag_size = 391 htosonic32(sc, dmamap->dm_segs[seg].ds_len); 392 totlen += dmamap->dm_segs[seg].ds_len; 393 } 394 if (totlen < ETHER_PAD_LEN) { 395 tda32->tda_frags[seg].frag_ptr1 = 396 htosonic32(sc, 397 (sc->sc_nulldma >> 16) & 0xffff); 398 tda32->tda_frags[seg].frag_ptr0 = 399 htosonic32(sc, 400 sc->sc_nulldma & 0xffff); 401 tda32->tda_frags[seg].frag_size = 402 htosonic32(sc, ETHER_PAD_LEN - totlen); 403 totlen = ETHER_PAD_LEN; 404 } 405 406 tda32->tda_status = 0; 407 tda32->tda_pktconfig = 0; 408 tda32->tda_pktsize = htosonic32(sc, totlen); 409 tda32->tda_fragcnt = htosonic32(sc, seg); 410 411 /* Link it up. */ 412 tda32->tda_frags[seg].frag_ptr0 = 413 htosonic32(sc, SONIC_CDTXADDR32(sc, 414 SONIC_NEXTTX(nexttx)) & 0xffff); 415 416 /* Sync the Tx descriptor. */ 417 SONIC_CDTXSYNC32(sc, nexttx, 418 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 419 } else { 420 tda16 = &sc->sc_tda16[nexttx]; 421 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 422 tda16->tda_frags[seg].frag_ptr1 = 423 htosonic16(sc, 424 (dmamap->dm_segs[seg].ds_addr >> 16) & 425 0xffff); 426 tda16->tda_frags[seg].frag_ptr0 = 427 htosonic16(sc, 428 dmamap->dm_segs[seg].ds_addr & 0xffff); 429 tda16->tda_frags[seg].frag_size = 430 htosonic16(sc, dmamap->dm_segs[seg].ds_len); 431 totlen += dmamap->dm_segs[seg].ds_len; 432 } 433 if (totlen < ETHER_PAD_LEN) { 434 tda16->tda_frags[seg].frag_ptr1 = 435 htosonic16(sc, 436 (sc->sc_nulldma >> 16) & 0xffff); 437 tda16->tda_frags[seg].frag_ptr0 = 438 htosonic16(sc, 439 sc->sc_nulldma & 0xffff); 440 tda16->tda_frags[seg].frag_size = 441 htosonic16(sc, ETHER_PAD_LEN - totlen); 442 totlen = ETHER_PAD_LEN; 443 } 444 445 tda16->tda_status = 0; 446 tda16->tda_pktconfig = 0; 447 tda16->tda_pktsize = htosonic16(sc, totlen); 448 tda16->tda_fragcnt = htosonic16(sc, seg); 449 450 /* Link it up. */ 451 tda16->tda_frags[seg].frag_ptr0 = 452 htosonic16(sc, SONIC_CDTXADDR16(sc, 453 SONIC_NEXTTX(nexttx)) & 0xffff); 454 455 /* Sync the Tx descriptor. */ 456 SONIC_CDTXSYNC16(sc, nexttx, 457 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 458 } 459 460 /* Advance the Tx pointer. */ 461 sc->sc_txpending++; 462 sc->sc_txlast = nexttx; 463 464#if NBPFILTER > 0 465 /* 466 * Pass the packet to any BPF listeners. 467 */ 468 if (ifp->if_bpf) 469 bpf_mtap(ifp->if_bpf, m0); 470#endif 471 } 472 473 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) { 474 /* No more slots left; notify upper layer. */ 475 ifp->if_flags |= IFF_OACTIVE; 476 } 477 478 if (sc->sc_txpending != opending) { 479 /* 480 * We enqueued packets. If the transmitter was idle, 481 * reset the txdirty pointer. 482 */ 483 if (opending == 0) 484 sc->sc_txdirty = SONIC_NEXTTX(olasttx); 485 486 /* 487 * Stop the SONIC on the last packet we've set up, 488 * and clear end-of-list on the descriptor previous 489 * to our new chain. 490 * 491 * NOTE: our `seg' variable should still be valid! 492 */ 493 if (sc->sc_32bit) { 494 olseg = 495 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt); 496 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 497 htosonic32(sc, TDA_LINK_EOL); 498 SONIC_CDTXSYNC32(sc, sc->sc_txlast, 499 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 500 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &= 501 htosonic32(sc, ~TDA_LINK_EOL); 502 SONIC_CDTXSYNC32(sc, olasttx, 503 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 504 } else { 505 olseg = 506 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt); 507 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 508 htosonic16(sc, TDA_LINK_EOL); 509 SONIC_CDTXSYNC16(sc, sc->sc_txlast, 510 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 511 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &= 512 htosonic16(sc, ~TDA_LINK_EOL); 513 SONIC_CDTXSYNC16(sc, olasttx, 514 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 515 } 516 517 /* Start the transmitter. */ 518 CSR_WRITE(sc, SONIC_CR, CR_TXP); 519 520 /* Set a watchdog timer in case the chip flakes out. */ 521 ifp->if_timer = 5; 522 } 523} 524 525/* 526 * sonic_watchdog: [ifnet interface function] 527 * 528 * Watchdog timer handler. 529 */ 530void 531sonic_watchdog(struct ifnet *ifp) 532{ 533 struct sonic_softc *sc = ifp->if_softc; 534 535 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 536 ifp->if_oerrors++; 537 538 (void) sonic_init(ifp); 539} 540 541/* 542 * sonic_ioctl: [ifnet interface function] 543 * 544 * Handle control requests from the operator. 545 */ 546int 547sonic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 548{ 549 int s, error; 550 551 s = splnet(); 552 553 switch (cmd) { 554 default: 555 error = ether_ioctl(ifp, cmd, data); 556 if (error == ENETRESET) { 557 /* 558 * Multicast list has changed; set the hardware 559 * filter accordingly. 560 */ 561 (void) sonic_init(ifp); 562 error = 0; 563 } 564 break; 565 } 566 567 splx(s); 568 return (error); 569} 570 571/* 572 * sonic_intr: 573 * 574 * Interrupt service routine. 575 */ 576int 577sonic_intr(void *arg) 578{ 579 struct sonic_softc *sc = arg; 580 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 581 uint16_t isr; 582 int handled = 0, wantinit; 583 584 for (wantinit = 0; wantinit == 0;) { 585 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr; 586 if (isr == 0) 587 break; 588 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */ 589 590 handled = 1; 591 592 if (isr & IMR_PRX) 593 sonic_rxintr(sc); 594 595 if (isr & (IMR_PTX|IMR_TXER)) { 596 if (sonic_txintr(sc) & TCR_FU) { 597 printf("%s: transmit FIFO underrun\n", 598 sc->sc_dev.dv_xname); 599 wantinit = 1; 600 } 601 } 602 603 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) { 604#define PRINTERR(bit, str) \ 605 if (isr & (bit)) \ 606 printf("%s: %s\n", sc->sc_dev.dv_xname, str) 607 PRINTERR(IMR_RFO, "receive FIFO overrun"); 608 PRINTERR(IMR_RBA, "receive buffer exceeded"); 609 PRINTERR(IMR_RBE, "receive buffers exhausted"); 610 PRINTERR(IMR_RDE, "receive descriptors exhausted"); 611 wantinit = 1; 612 } 613 } 614 615 if (handled) { 616 if (wantinit) 617 (void) sonic_init(ifp); 618 sonic_start(ifp); 619 } 620 621 return (handled); 622} 623 624/* 625 * sonic_txintr: 626 * 627 * Helper; handle transmit complete interrupts. 628 */ 629uint16_t 630sonic_txintr(struct sonic_softc *sc) 631{ 632 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 633 struct sonic_descsoft *ds; 634 struct sonic_tda32 *tda32; 635 struct sonic_tda16 *tda16; 636 uint16_t status, totstat = 0; 637 int i; 638 639 ifp->if_flags &= ~IFF_OACTIVE; 640 641 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 642 i = SONIC_NEXTTX(i), sc->sc_txpending--) { 643 ds = &sc->sc_txsoft[i]; 644 645 if (sc->sc_32bit) { 646 SONIC_CDTXSYNC32(sc, i, 647 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 648 tda32 = &sc->sc_tda32[i]; 649 status = sonic32toh(sc, tda32->tda_status); 650 } else { 651 SONIC_CDTXSYNC16(sc, i, 652 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 653 tda16 = &sc->sc_tda16[i]; 654 status = sonic16toh(sc, tda16->tda_status); 655 } 656 657 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0) 658 break; 659 660 totstat |= status; 661 662 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 663 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 664 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 665 m_freem(ds->ds_mbuf); 666 ds->ds_mbuf = NULL; 667 668 /* 669 * Check for errors and collisions. 670 */ 671 if (status & TCR_PTX) 672 ifp->if_opackets++; 673 else 674 ifp->if_oerrors++; 675 ifp->if_collisions += TDA_STATUS_NCOL(status); 676 } 677 678 /* Update the dirty transmit buffer pointer. */ 679 sc->sc_txdirty = i; 680 681 /* 682 * Cancel the watchdog timer if there are no pending 683 * transmissions. 684 */ 685 if (sc->sc_txpending == 0) 686 ifp->if_timer = 0; 687 688 return (totstat); 689} 690 691/* 692 * sonic_rxintr: 693 * 694 * Helper; handle receive interrupts. 695 */ 696void 697sonic_rxintr(struct sonic_softc *sc) 698{ 699 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 700 struct sonic_descsoft *ds; 701 struct sonic_rda32 *rda32; 702 struct sonic_rda16 *rda16; 703 struct mbuf *m; 704 int i, len; 705 uint16_t status, bytecount, ptr0, ptr1, seqno; 706 707 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) { 708 ds = &sc->sc_rxsoft[i]; 709 710 if (sc->sc_32bit) { 711 SONIC_CDRXSYNC32(sc, i, 712 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 713 rda32 = &sc->sc_rda32[i]; 714 if (rda32->rda_inuse != 0) 715 break; 716 status = sonic32toh(sc, rda32->rda_status); 717 bytecount = sonic32toh(sc, rda32->rda_bytecount); 718 ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); 719 ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); 720 seqno = sonic32toh(sc, rda32->rda_seqno); 721 } else { 722 SONIC_CDRXSYNC16(sc, i, 723 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 724 rda16 = &sc->sc_rda16[i]; 725 if (rda16->rda_inuse != 0) 726 break; 727 status = sonic16toh(sc, rda16->rda_status); 728 bytecount = sonic16toh(sc, rda16->rda_bytecount); 729 ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); 730 ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); 731 seqno = sonic16toh(sc, rda16->rda_seqno); 732 } 733 734 /* 735 * Make absolutely sure this is the only packet 736 * in this receive buffer. Our entire Rx buffer 737 * management scheme depends on this, and if the 738 * SONIC didn't follow our rule, it means we've 739 * misconfigured it. 740 */ 741 KASSERT(status & RCR_LPKT); 742 743 /* 744 * Make sure the packet arrived OK. If an error occurred, 745 * update stats and reset the descriptor. The buffer will 746 * be reused the next time the descriptor comes up in the 747 * ring. 748 */ 749 if ((status & RCR_PRX) == 0) { 750 if (status & RCR_FAER) 751 printf("%s: Rx frame alignment error\n", 752 sc->sc_dev.dv_xname); 753 else if (status & RCR_CRCR) 754 printf("%s: Rx CRC error\n", 755 sc->sc_dev.dv_xname); 756 ifp->if_ierrors++; 757 SONIC_INIT_RXDESC(sc, i); 758 continue; 759 } 760 761 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 762 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 763 764 /* 765 * The SONIC includes the CRC with every packet. 766 */ 767 len = bytecount; 768 769 /* 770 * Ok, if the chip is in 32-bit mode, then receive 771 * buffers must be aligned to 32-bit boundaries, 772 * which means the payload is misaligned. In this 773 * case, we must allocate a new mbuf, and copy the 774 * packet into it, scooted forward 2 bytes to ensure 775 * proper alignment. 776 * 777 * Note, in 16-bit mode, we can configure the SONIC 778 * to do what we want, and we have. 779 */ 780#ifndef __NO_STRICT_ALIGNMENT 781 if (sc->sc_32bit) { 782 MGETHDR(m, M_DONTWAIT, MT_DATA); 783 if (m == NULL) 784 goto dropit; 785 if (len > (MHLEN - 2)) { 786 MCLGET(m, M_DONTWAIT); 787 if ((m->m_flags & M_EXT) == 0) 788 goto dropit; 789 } 790 m->m_data += 2; 791 /* 792 * Note that we use a cluster for incoming frames, 793 * so the buffer is virtually contiguous. 794 */ 795 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), 796 len); 797 SONIC_INIT_RXDESC(sc, i); 798 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 799 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 800 } else 801#endif /* ! __NO_STRICT_ALIGNMENT */ 802 /* 803 * If the packet is small enough to fit in a single 804 * header mbuf, allocate one and copy the data into 805 * it. This greatly reduces memory consumption when 806 * we receive lots of small packets. 807 */ 808 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) { 809 MGETHDR(m, M_DONTWAIT, MT_DATA); 810 if (m == NULL) 811 goto dropit; 812 m->m_data += 2; 813 /* 814 * Note that we use a cluster for incoming frames, 815 * so the buffer is virtually contiguous. 816 */ 817 memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), 818 len); 819 SONIC_INIT_RXDESC(sc, i); 820 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 821 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 822 } else { 823 m = ds->ds_mbuf; 824 if (sonic_add_rxbuf(sc, i) != 0) { 825 dropit: 826 ifp->if_ierrors++; 827 SONIC_INIT_RXDESC(sc, i); 828 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 829 ds->ds_dmamap->dm_mapsize, 830 BUS_DMASYNC_PREREAD); 831 continue; 832 } 833 } 834 835 ifp->if_ipackets++; 836 m->m_flags |= M_HASFCS; 837 m->m_pkthdr.rcvif = ifp; 838 m->m_pkthdr.len = m->m_len = len; 839 840#if NBPFILTER > 0 841 /* 842 * Pass this up to any BPF listeners. 843 */ 844 if (ifp->if_bpf) 845 bpf_mtap(ifp->if_bpf, m); 846#endif /* NBPFILTER > 0 */ 847 848 /* Pass it on. */ 849 (*ifp->if_input)(ifp, m); 850 } 851 852 /* Update the receive pointer. */ 853 sc->sc_rxptr = i; 854 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i))); 855} 856 857/* 858 * sonic_reset: 859 * 860 * Perform a soft reset on the SONIC. 861 */ 862void 863sonic_reset(struct sonic_softc *sc) 864{ 865 866 CSR_WRITE(sc, SONIC_CR, 0); /* ensure RST is clear */ 867 CSR_WRITE(sc, SONIC_CR, CR_RST); 868 delay(1000); 869 CSR_WRITE(sc, SONIC_CR, 0); 870 delay(1000); 871} 872 873/* 874 * sonic_init: [ifnet interface function] 875 * 876 * Initialize the interface. Must be called at splnet(). 877 */ 878int 879sonic_init(struct ifnet *ifp) 880{ 881 struct sonic_softc *sc = ifp->if_softc; 882 struct sonic_descsoft *ds; 883 int i, error = 0; 884 uint16_t reg; 885 886 /* 887 * Cancel any pending I/O. 888 */ 889 sonic_stop(ifp, 0); 890 891 /* 892 * Reset the SONIC to a known state. 893 */ 894 sonic_reset(sc); 895 896 /* 897 * Bring the SONIC into reset state, and program the DCR. 898 * 899 * Note: We don't bother optimizing the transmit and receive 900 * thresholds, here. We just use the most conservative values: 901 * 902 * - Rx: 4 bytes (RFT0,RFT0 == 0,0) 903 * - Tx: 28 bytes (TFT0,TFT1 == 1,1) 904 */ 905 reg = sc->sc_dcr | DCR_TFT0 | DCR_TFT1; 906 if (sc->sc_32bit) 907 reg |= DCR_DW; 908 CSR_WRITE(sc, SONIC_CR, CR_RST); 909 CSR_WRITE(sc, SONIC_DCR, reg); 910 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2); 911 CSR_WRITE(sc, SONIC_CR, 0); 912 913 /* 914 * Initialize the transmit descriptors. 915 */ 916 if (sc->sc_32bit) { 917 for (i = 0; i < SONIC_NTXDESC; i++) { 918 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32)); 919 SONIC_CDTXSYNC32(sc, i, 920 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 921 } 922 } else { 923 for (i = 0; i < SONIC_NTXDESC; i++) { 924 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16)); 925 SONIC_CDTXSYNC16(sc, i, 926 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 927 } 928 } 929 sc->sc_txpending = 0; 930 sc->sc_txdirty = 0; 931 sc->sc_txlast = SONIC_NTXDESC - 1; 932 933 /* 934 * Initialize the receive descriptor ring. 935 */ 936 for (i = 0; i < SONIC_NRXDESC; i++) { 937 ds = &sc->sc_rxsoft[i]; 938 if (ds->ds_mbuf == NULL) { 939 if ((error = sonic_add_rxbuf(sc, i)) != 0) { 940 printf("%s: unable to allocate or map Rx " 941 "buffer %d, error = %d\n", 942 sc->sc_dev.dv_xname, i, error); 943 /* 944 * XXX Should attempt to run with fewer receive 945 * XXX buffers instead of just failing. 946 */ 947 sonic_rxdrain(sc); 948 goto out; 949 } 950 } else 951 SONIC_INIT_RXDESC(sc, i); 952 } 953 sc->sc_rxptr = 0; 954 955 /* Give the transmit ring to the SONIC. */ 956 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff); 957 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff); 958 959 /* Give the receive descriptor ring to the SONIC. */ 960 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff); 961 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff); 962 963 /* Give the receive buffer ring to the SONIC. */ 964 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff); 965 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff); 966 if (sc->sc_32bit) 967 CSR_WRITE(sc, SONIC_REAR, 968 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 969 sizeof(struct sonic_rra32)) & 0xffff); 970 else 971 CSR_WRITE(sc, SONIC_REAR, 972 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 973 sizeof(struct sonic_rra16)) & 0xffff); 974 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff); 975 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1)); 976 977 /* 978 * Set the End-Of-Buffer counter such that only one packet 979 * will be placed into each buffer we provide. Note we are 980 * following the recommendation of section 3.4.4 of the manual 981 * here, and have "lengthened" the receive buffers accordingly. 982 */ 983 if (sc->sc_32bit) 984 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2); 985 else 986 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2)); 987 988 /* Reset the receive sequence counter. */ 989 CSR_WRITE(sc, SONIC_RSC, 0); 990 991 /* Clear the tally registers. */ 992 CSR_WRITE(sc, SONIC_CRCETC, 0xffff); 993 CSR_WRITE(sc, SONIC_FAET, 0xffff); 994 CSR_WRITE(sc, SONIC_MPT, 0xffff); 995 996 /* Set the receive filter. */ 997 sonic_set_filter(sc); 998 999 /* 1000 * Set the interrupt mask register. 1001 */ 1002 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE | 1003 IMR_TXER | IMR_PTX | IMR_PRX; 1004 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr); 1005 1006 /* 1007 * Start the receive process in motion. Note, we don't 1008 * start the transmit process until we actually try to 1009 * transmit packets. 1010 */ 1011 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA); 1012 1013 /* 1014 * ...all done! 1015 */ 1016 ifp->if_flags |= IFF_RUNNING; 1017 ifp->if_flags &= ~IFF_OACTIVE; 1018 1019 out: 1020 if (error) 1021 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1022 return (error); 1023} 1024 1025/* 1026 * sonic_rxdrain: 1027 * 1028 * Drain the receive queue. 1029 */ 1030void 1031sonic_rxdrain(struct sonic_softc *sc) 1032{ 1033 struct sonic_descsoft *ds; 1034 int i; 1035 1036 for (i = 0; i < SONIC_NRXDESC; i++) { 1037 ds = &sc->sc_rxsoft[i]; 1038 if (ds->ds_mbuf != NULL) { 1039 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1040 m_freem(ds->ds_mbuf); 1041 ds->ds_mbuf = NULL; 1042 } 1043 } 1044} 1045 1046/* 1047 * sonic_stop: [ifnet interface function] 1048 * 1049 * Stop transmission on the interface. 1050 */ 1051void 1052sonic_stop(struct ifnet *ifp, int disable) 1053{ 1054 struct sonic_softc *sc = ifp->if_softc; 1055 struct sonic_descsoft *ds; 1056 int i; 1057 1058 /* 1059 * Disable interrupts. 1060 */ 1061 CSR_WRITE(sc, SONIC_IMR, 0); 1062 1063 /* 1064 * Stop the transmitter, receiver, and timer. 1065 */ 1066 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP); 1067 for (i = 0; i < 1000; i++) { 1068 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0) 1069 break; 1070 delay(2); 1071 } 1072 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0) 1073 printf("%s: SONIC failed to stop\n", sc->sc_dev.dv_xname); 1074 1075 /* 1076 * Release any queued transmit buffers. 1077 */ 1078 for (i = 0; i < SONIC_NTXDESC; i++) { 1079 ds = &sc->sc_txsoft[i]; 1080 if (ds->ds_mbuf != NULL) { 1081 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1082 m_freem(ds->ds_mbuf); 1083 ds->ds_mbuf = NULL; 1084 } 1085 } 1086 1087 if (disable) 1088 sonic_rxdrain(sc); 1089 1090 /* 1091 * Mark the interface down and cancel the watchdog timer. 1092 */ 1093 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1094 ifp->if_timer = 0; 1095} 1096 1097/* 1098 * sonic_add_rxbuf: 1099 * 1100 * Add a receive buffer to the indicated descriptor. 1101 */ 1102int 1103sonic_add_rxbuf(struct sonic_softc *sc, int idx) 1104{ 1105 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx]; 1106 struct mbuf *m; 1107 int error; 1108 1109 MGETHDR(m, M_DONTWAIT, MT_DATA); 1110 if (m == NULL) 1111 return (ENOBUFS); 1112 1113 MCLGET(m, M_DONTWAIT); 1114 if ((m->m_flags & M_EXT) == 0) { 1115 m_freem(m); 1116 return (ENOBUFS); 1117 } 1118 1119 if (ds->ds_mbuf != NULL) 1120 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1121 1122 ds->ds_mbuf = m; 1123 1124 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1125 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1126 BUS_DMA_READ|BUS_DMA_NOWAIT); 1127 if (error) { 1128 printf("%s: can't load rx DMA map %d, error = %d\n", 1129 sc->sc_dev.dv_xname, idx, error); 1130 panic("sonic_add_rxbuf"); /* XXX */ 1131 } 1132 1133 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1134 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1135 1136 SONIC_INIT_RXDESC(sc, idx); 1137 1138 return (0); 1139} 1140 1141static void 1142sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr) 1143{ 1144 1145 if (sc->sc_32bit) { 1146 struct sonic_cda32 *cda = &sc->sc_cda32[entry]; 1147 1148 cda->cda_entry = htosonic32(sc, entry); 1149 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8)); 1150 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8)); 1151 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8)); 1152 } else { 1153 struct sonic_cda16 *cda = &sc->sc_cda16[entry]; 1154 1155 cda->cda_entry = htosonic16(sc, entry); 1156 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8)); 1157 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8)); 1158 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8)); 1159 } 1160} 1161 1162/* 1163 * sonic_set_filter: 1164 * 1165 * Set the SONIC receive filter. 1166 */ 1167void 1168sonic_set_filter(struct sonic_softc *sc) 1169{ 1170 struct ethercom *ec = &sc->sc_ethercom; 1171 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1172 struct ether_multi *enm; 1173 struct ether_multistep step; 1174 int i, entry = 0; 1175 uint16_t camvalid = 0; 1176 uint16_t rcr = 0; 1177 1178 if (ifp->if_flags & IFF_BROADCAST) 1179 rcr |= RCR_BRD; 1180 1181 if (ifp->if_flags & IFF_PROMISC) { 1182 rcr |= RCR_PRO; 1183 goto allmulti; 1184 } 1185 1186 /* Put our station address in the first CAM slot. */ 1187 sonic_set_camentry(sc, entry, LLADDR(ifp->if_sadl)); 1188 camvalid |= (1U << entry); 1189 entry++; 1190 1191 /* Add the multicast addresses to the CAM. */ 1192 ETHER_FIRST_MULTI(step, ec, enm); 1193 while (enm != NULL) { 1194 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1195 /* 1196 * We must listen to a range of multicast addresses. 1197 * The only way to do this on the SONIC is to enable 1198 * reception of all multicast packets. 1199 */ 1200 goto allmulti; 1201 } 1202 1203 if (entry == 16) { 1204 /* 1205 * Out of CAM slots. Have to enable reception 1206 * of all multicast addresses. 1207 */ 1208 goto allmulti; 1209 } 1210 1211 sonic_set_camentry(sc, entry, enm->enm_addrlo); 1212 camvalid |= (1U << entry); 1213 entry++; 1214 1215 ETHER_NEXT_MULTI(step, enm); 1216 } 1217 1218 ifp->if_flags &= ~IFF_ALLMULTI; 1219 goto setit; 1220 1221 allmulti: 1222 /* Use only the first CAM slot (station address). */ 1223 camvalid = 0x0001; 1224 entry = 1; 1225 rcr |= RCR_AMC; 1226 1227 setit: 1228 /* Load the CAM. */ 1229 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE); 1230 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff); 1231 CSR_WRITE(sc, SONIC_CDC, entry); 1232 CSR_WRITE(sc, SONIC_CR, CR_LCAM); 1233 for (i = 0; i < 10000; i++) { 1234 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0) 1235 break; 1236 delay(2); 1237 } 1238 if (CSR_READ(sc, SONIC_CR) & CR_LCAM) 1239 printf("%s: CAM load failed\n", sc->sc_dev.dv_xname); 1240 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE); 1241 1242 /* Set the CAM enable resgiter. */ 1243 CSR_WRITE(sc, SONIC_CER, camvalid); 1244 1245 /* Set the receive control register. */ 1246 CSR_WRITE(sc, SONIC_RCR, rcr); 1247} 1248