if_sq.c revision 1.11
1/* $NetBSD: if_sq.c,v 1.11 2002/05/02 20:31:19 rafal Exp $ */ 2 3/* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35#include "bpfilter.h" 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/device.h> 40#include <sys/callout.h> 41#include <sys/mbuf.h> 42#include <sys/malloc.h> 43#include <sys/kernel.h> 44#include <sys/socket.h> 45#include <sys/ioctl.h> 46#include <sys/errno.h> 47#include <sys/syslog.h> 48 49#include <uvm/uvm_extern.h> 50 51#include <machine/endian.h> 52 53#include <net/if.h> 54#include <net/if_dl.h> 55#include <net/if_media.h> 56#include <net/if_ether.h> 57 58#if NBPFILTER > 0 59#include <net/bpf.h> 60#endif 61 62#include <machine/bus.h> 63#include <machine/intr.h> 64 65#include <dev/ic/seeq8003reg.h> 66 67#include <sgimips/hpc/sqvar.h> 68#include <sgimips/hpc/hpcvar.h> 69#include <sgimips/hpc/hpcreg.h> 70 71#include <dev/arcbios/arcbios.h> 72#include <dev/arcbios/arcbiosvar.h> 73 74#define static 75 76/* 77 * Short TODO list: 78 * (1) Do counters for bad-RX packets. 79 * (2) Allow multi-segment transmits, instead of copying to a single, 80 * contiguous mbuf. 81 * (3) Verify sq_stop() turns off enough stuff; I was still getting 82 * seeq interrupts after sq_stop(). 83 * (4) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG 84 * or something similar. 85 * (5) Implement EDLC modes: especially packet auto-pad and simplex 86 * mode. 87 * (6) Should the driver filter out its own transmissions in non-EDLC 88 * mode? 89 * (7) Multicast support -- multicast filter, address management, ... 90 * (8) Deal with RB0 (recv buffer overflow) on reception. Will need 91 * to figure out if RB0 is read-only as stated in one spot in the 92 * HPC spec or read-write (ie, is the 'write a one to clear it') 93 * the correct thing? 94 */ 95 96static int sq_match(struct device *, struct cfdata *, void *); 97static void sq_attach(struct device *, struct device *, void *); 98static int sq_init(struct ifnet *); 99static void sq_start(struct ifnet *); 100static void sq_stop(struct ifnet *, int); 101static void sq_watchdog(struct ifnet *); 102static int sq_ioctl(struct ifnet *, u_long, caddr_t); 103 104static void sq_set_filter(struct sq_softc *); 105static int sq_intr(void *); 106static int sq_rxintr(struct sq_softc *); 107static int sq_txintr(struct sq_softc *); 108static void sq_reset(struct sq_softc *); 109static int sq_add_rxbuf(struct sq_softc *, int); 110static void sq_dump_buffer(u_int32_t addr, u_int32_t len); 111 112static void enaddr_aton(const char*, u_int8_t*); 113 114/* Actions */ 115#define SQ_RESET 1 116#define SQ_ADD_TO_DMA 2 117#define SQ_START_DMA 3 118#define SQ_DONE_DMA 4 119#define SQ_RESTART_DMA 5 120#define SQ_TXINTR_ENTER 6 121#define SQ_TXINTR_EXIT 7 122#define SQ_TXINTR_BUSY 8 123 124struct sq_action_trace { 125 int action; 126 int bufno; 127 int status; 128 int freebuf; 129}; 130 131#define SQ_TRACEBUF_SIZE 100 132int sq_trace_idx = 0; 133struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE]; 134 135void sq_trace_dump(struct sq_softc* sc); 136 137#define SQ_TRACE(act, buf, stat, free) do { \ 138 sq_trace[sq_trace_idx].action = (act); \ 139 sq_trace[sq_trace_idx].bufno = (buf); \ 140 sq_trace[sq_trace_idx].status = (stat); \ 141 sq_trace[sq_trace_idx].freebuf = (free); \ 142 if (++sq_trace_idx == SQ_TRACEBUF_SIZE) { \ 143 memset(&sq_trace, 0, sizeof(sq_trace)); \ 144 sq_trace_idx = 0; \ 145 } \ 146} while (0) 147 148struct cfattach sq_ca = { 149 sizeof(struct sq_softc), sq_match, sq_attach 150}; 151 152static int 153sq_match(struct device *parent, struct cfdata *cf, void *aux) 154{ 155 struct hpc_attach_args *ha = aux; 156 157 if (strcmp(ha->ha_name, cf->cf_driver->cd_name) == 0) 158 return (1); 159 160 return (0); 161} 162 163static void 164sq_attach(struct device *parent, struct device *self, void *aux) 165{ 166 int i, err; 167 char* macaddr; 168 struct sq_softc *sc = (void *)self; 169 struct hpc_attach_args *haa = aux; 170 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 171 172 sc->sc_hpct = haa->ha_st; 173 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 174 haa->ha_dmaoff, 175 HPC_ENET_REGS_SIZE, 176 &sc->sc_hpch)) != 0) { 177 printf(": unable to map HPC DMA registers, error = %d\n", err); 178 goto fail_0; 179 } 180 181 sc->sc_regt = haa->ha_st; 182 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 183 haa->ha_devoff, 184 HPC_ENET_DEVREGS_SIZE, 185 &sc->sc_regh)) != 0) { 186 printf(": unable to map Seeq registers, error = %d\n", err); 187 goto fail_0; 188 } 189 190 sc->sc_dmat = haa->ha_dmat; 191 192 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 193 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 194 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 195 printf(": unable to allocate control data, error = %d\n", err); 196 goto fail_0; 197 } 198 199 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 200 sizeof(struct sq_control), 201 (caddr_t *)&sc->sc_control, 202 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 203 printf(": unable to map control data, error = %d\n", err); 204 goto fail_1; 205 } 206 207 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 208 1, sizeof(struct sq_control), PAGE_SIZE, 209 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 210 printf(": unable to create DMA map for control data, error " 211 "= %d\n", err); 212 goto fail_2; 213 } 214 215 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 216 sizeof(struct sq_control), 217 NULL, BUS_DMA_NOWAIT)) != 0) { 218 printf(": unable to load DMA map for control data, error " 219 "= %d\n", err); 220 goto fail_3; 221 } 222 223 memset(sc->sc_control, 0, sizeof(struct sq_control)); 224 225 /* Create transmit buffer DMA maps */ 226 for (i = 0; i < SQ_NTXDESC; i++) { 227 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 228 0, BUS_DMA_NOWAIT, 229 &sc->sc_txmap[i])) != 0) { 230 printf(": unable to create tx DMA map %d, error = %d\n", 231 i, err); 232 goto fail_4; 233 } 234 } 235 236 /* Create transmit buffer DMA maps */ 237 for (i = 0; i < SQ_NRXDESC; i++) { 238 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 239 0, BUS_DMA_NOWAIT, 240 &sc->sc_rxmap[i])) != 0) { 241 printf(": unable to create rx DMA map %d, error = %d\n", 242 i, err); 243 goto fail_5; 244 } 245 } 246 247 /* Pre-allocate the receive buffers. */ 248 for (i = 0; i < SQ_NRXDESC; i++) { 249 if ((err = sq_add_rxbuf(sc, i)) != 0) { 250 printf(": unable to allocate or map rx buffer %d\n," 251 " error = %d\n", i, err); 252 goto fail_6; 253 } 254 } 255 256 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 257 printf(": unable to get MAC address!\n"); 258 goto fail_6; 259 } 260 261 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 262 self->dv_xname, "intr"); 263 264 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 265 printf(": unable to establish interrupt!\n"); 266 goto fail_6; 267 } 268 269 /* Reset the chip to a known state. */ 270 sq_reset(sc); 271 272 /* 273 * Determine if we're an 8003 or 80c03 by setting the first 274 * MAC address register to non-zero, and then reading it back. 275 * If it's zero, we have an 80c03, because we will have read 276 * the TxCollLSB register. 277 */ 278 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5); 279 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0) 280 sc->sc_type = SQ_TYPE_80C03; 281 else 282 sc->sc_type = SQ_TYPE_8003; 283 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00); 284 285 printf(": SGI Seeq %s\n", 286 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 287 288 enaddr_aton(macaddr, sc->sc_enaddr); 289 290 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 291 ether_sprintf(sc->sc_enaddr)); 292 293 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 294 ifp->if_softc = sc; 295 ifp->if_mtu = ETHERMTU; 296 ifp->if_init = sq_init; 297 ifp->if_stop = sq_stop; 298 ifp->if_start = sq_start; 299 ifp->if_ioctl = sq_ioctl; 300 ifp->if_watchdog = sq_watchdog; 301 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 302 IFQ_SET_READY(&ifp->if_snd); 303 304 if_attach(ifp); 305 ether_ifattach(ifp, sc->sc_enaddr); 306 307 memset(&sq_trace, 0, sizeof(sq_trace)); 308 /* Done! */ 309 return; 310 311 /* 312 * Free any resources we've allocated during the failed attach 313 * attempt. Do this in reverse order and fall through. 314 */ 315fail_6: 316 for (i = 0; i < SQ_NRXDESC; i++) { 317 if (sc->sc_rxmbuf[i] != NULL) { 318 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 319 m_freem(sc->sc_rxmbuf[i]); 320 } 321 } 322fail_5: 323 for (i = 0; i < SQ_NRXDESC; i++) { 324 if (sc->sc_rxmap[i] != NULL) 325 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 326 } 327fail_4: 328 for (i = 0; i < SQ_NTXDESC; i++) { 329 if (sc->sc_txmap[i] != NULL) 330 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 331 } 332 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 333fail_3: 334 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 335fail_2: 336 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control, 337 sizeof(struct sq_control)); 338fail_1: 339 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 340fail_0: 341 return; 342} 343 344/* Set up data to get the interface up and running. */ 345int 346sq_init(struct ifnet *ifp) 347{ 348 int i; 349 u_int32_t reg; 350 struct sq_softc *sc = ifp->if_softc; 351 352 /* Cancel any in-progress I/O */ 353 sq_stop(ifp, 0); 354 355 sc->sc_nextrx = 0; 356 357 sc->sc_nfreetx = SQ_NTXDESC; 358 sc->sc_nexttx = sc->sc_prevtx = 0; 359 360 SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx); 361 362 /* Set into 8003 mode, bank 0 to program ethernet address */ 363 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0); 364 365 /* Now write the address */ 366 for (i = 0; i < ETHER_ADDR_LEN; i++) 367 bus_space_write_1(sc->sc_regt, sc->sc_regh, i, 368 sc->sc_enaddr[i]); 369 370 sc->sc_rxcmd = RXCMD_IE_CRC | 371 RXCMD_IE_DRIB | 372 RXCMD_IE_SHORT | 373 RXCMD_IE_END | 374 RXCMD_IE_GOOD; 375 376 /* 377 * Set the receive filter -- this will add some bits to the 378 * prototype RXCMD register. Do this before setting the 379 * transmit config register, since we might need to switch 380 * banks. 381 */ 382 sq_set_filter(sc); 383 384 /* Set up Seeq transmit command register */ 385 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 386 TXCMD_IE_UFLOW | 387 TXCMD_IE_COLL | 388 TXCMD_IE_16COLL | 389 TXCMD_IE_GOOD); 390 391 /* Now write the receive command register. */ 392 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd); 393 394 /* Set up HPC ethernet DMA config */ 395 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG); 396 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG, 397 reg | ENETR_DMACFG_FIX_RXDC | 398 ENETR_DMACFG_FIX_INTR | 399 ENETR_DMACFG_FIX_EOP); 400 401 /* Pass the start of the receive ring to the HPC */ 402 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP, 403 SQ_CDRXADDR(sc, 0)); 404 405 /* And turn on the HPC ethernet receive channel */ 406 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 407 ENETR_CTL_ACTIVE); 408 409 ifp->if_flags |= IFF_RUNNING; 410 ifp->if_flags &= ~IFF_OACTIVE; 411 412 return 0; 413} 414 415static void 416sq_set_filter(struct sq_softc *sc) 417{ 418 struct ethercom *ec = &sc->sc_ethercom; 419 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 420 struct ether_multi *enm; 421 struct ether_multistep step; 422 423 /* 424 * Check for promiscuous mode. Also implies 425 * all-multicast. 426 */ 427 if (ifp->if_flags & IFF_PROMISC) { 428 sc->sc_rxcmd |= RXCMD_REC_ALL; 429 ifp->if_flags |= IFF_ALLMULTI; 430 return; 431 } 432 433 /* 434 * The 8003 has no hash table. If we have any multicast 435 * addresses on the list, enable reception of all multicast 436 * frames. 437 * 438 * XXX The 80c03 has a hash table. We should use it. 439 */ 440 441 ETHER_FIRST_MULTI(step, ec, enm); 442 443 if (enm == NULL) { 444 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 445 sc->sc_rxcmd |= RXCMD_REC_BROAD; 446 447 ifp->if_flags &= ~IFF_ALLMULTI; 448 return; 449 } 450 451 sc->sc_rxcmd |= RXCMD_REC_MULTI; 452 ifp->if_flags |= IFF_ALLMULTI; 453} 454 455int 456sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 457{ 458 int s, error = 0; 459 460 s = splnet(); 461 462 error = ether_ioctl(ifp, cmd, data); 463 if (error == ENETRESET) { 464 /* 465 * Multicast list has changed; set the hardware filter 466 * accordingly. 467 */ 468 error = sq_init(ifp); 469 } 470 471 splx(s); 472 return (error); 473} 474 475void 476sq_start(struct ifnet *ifp) 477{ 478 struct sq_softc *sc = ifp->if_softc; 479 u_int32_t status; 480 struct mbuf *m0, *m; 481 bus_dmamap_t dmamap; 482 int err, totlen, nexttx, firsttx, lasttx, ofree, seg; 483 484 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 485 return; 486 487 /* 488 * Remember the previous number of free descriptors and 489 * the first descriptor we'll use. 490 */ 491 ofree = sc->sc_nfreetx; 492 firsttx = sc->sc_nexttx; 493 494 /* 495 * Loop through the send queue, setting up transmit descriptors 496 * until we drain the queue, or use up all available transmit 497 * descriptors. 498 */ 499 while (sc->sc_nfreetx != 0) { 500 /* 501 * Grab a packet off the queue. 502 */ 503 IFQ_POLL(&ifp->if_snd, m0); 504 if (m0 == NULL) 505 break; 506 m = NULL; 507 508 dmamap = sc->sc_txmap[sc->sc_nexttx]; 509 510 /* 511 * Load the DMA map. If this fails, the packet either 512 * didn't fit in the alloted number of segments, or we were 513 * short on resources. In this case, we'll copy and try 514 * again. 515 */ 516 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 517 BUS_DMA_NOWAIT) != 0) { 518 MGETHDR(m, M_DONTWAIT, MT_DATA); 519 if (m == NULL) { 520 printf("%s: unable to allocate Tx mbuf\n", 521 sc->sc_dev.dv_xname); 522 break; 523 } 524 if (m0->m_pkthdr.len > MHLEN) { 525 MCLGET(m, M_DONTWAIT); 526 if ((m->m_flags & M_EXT) == 0) { 527 printf("%s: unable to allocate Tx " 528 "cluster\n", sc->sc_dev.dv_xname); 529 m_freem(m); 530 break; 531 } 532 } 533 534 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 535 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 536 537 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 538 m, BUS_DMA_NOWAIT)) != 0) { 539 printf("%s: unable to load Tx buffer, " 540 "error = %d\n", sc->sc_dev.dv_xname, err); 541 break; 542 } 543 } 544 545 /* 546 * Ensure we have enough descriptors free to describe 547 * the packet. 548 */ 549 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 550 /* 551 * Not enough free descriptors to transmit this 552 * packet. We haven't committed to anything yet, 553 * so just unload the DMA map, put the packet 554 * back on the queue, and punt. Notify the upper 555 * layer that there are no more slots left. 556 * 557 * XXX We could allocate an mbuf and copy, but 558 * XXX it is worth it? 559 */ 560 ifp->if_flags |= IFF_OACTIVE; 561 bus_dmamap_unload(sc->sc_dmat, dmamap); 562 if (m != NULL) 563 m_freem(m); 564 break; 565 } 566 567 IFQ_DEQUEUE(&ifp->if_snd, m0); 568 if (m != NULL) { 569 m_freem(m0); 570 m0 = m; 571 } 572 573 /* 574 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 575 */ 576 577 /* Sync the DMA map. */ 578 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 579 BUS_DMASYNC_PREWRITE); 580 581 /* 582 * Initialize the transmit descriptors. 583 */ 584 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 585 seg < dmamap->dm_nsegs; 586 seg++, nexttx = SQ_NEXTTX(nexttx)) { 587 sc->sc_txdesc[nexttx].hdd_bufptr = 588 dmamap->dm_segs[seg].ds_addr; 589 sc->sc_txdesc[nexttx].hdd_ctl = 590 dmamap->dm_segs[seg].ds_len; 591 sc->sc_txdesc[nexttx].hdd_descptr= 592 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 593 lasttx = nexttx; 594 totlen += dmamap->dm_segs[seg].ds_len; 595 } 596 597 /* Last descriptor gets end-of-packet */ 598 sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET; 599 600 /* XXXrkb: if not EDLC, pad to min len manually */ 601 if (totlen < ETHER_MIN_LEN) { 602 sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen); 603 totlen = ETHER_MIN_LEN; 604 } 605 606#if 0 607 printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, 608 sc->sc_nexttx, lasttx, 609 totlen); 610#endif 611 612 if (ifp->if_flags & IFF_DEBUG) { 613 printf(" transmit chain:\n"); 614 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 615 printf(" descriptor %d:\n", seg); 616 printf(" hdd_bufptr: 0x%08x\n", 617 sc->sc_txdesc[seg].hdd_bufptr); 618 printf(" hdd_ctl: 0x%08x\n", 619 sc->sc_txdesc[seg].hdd_ctl); 620 printf(" hdd_descptr: 0x%08x\n", 621 sc->sc_txdesc[seg].hdd_descptr); 622 623 if (seg == lasttx) 624 break; 625 } 626 } 627 628 /* Sync the descriptors we're using. */ 629 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 630 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 631 632 /* Store a pointer to the packet so we can free it later */ 633 sc->sc_txmbuf[sc->sc_nexttx] = m0; 634 635 /* Advance the tx pointer. */ 636 sc->sc_nfreetx -= dmamap->dm_nsegs; 637 sc->sc_nexttx = nexttx; 638 639#if NBPFILTER > 0 640 /* 641 * Pass the packet to any BPF listeners. 642 */ 643 if (ifp->if_bpf) 644 bpf_mtap(ifp->if_bpf, m0); 645#endif /* NBPFILTER > 0 */ 646 } 647 648 /* All transmit descriptors used up, let upper layers know */ 649 if (sc->sc_nfreetx == 0) 650 ifp->if_flags |= IFF_OACTIVE; 651 652 if (sc->sc_nfreetx != ofree) { 653#if 0 654 printf("%s: %d packets enqueued, first %d, INTR on %d\n", 655 sc->sc_dev.dv_xname, lasttx - firsttx + 1, 656 firsttx, lasttx); 657#endif 658 659 /* 660 * Cause a transmit interrupt to happen on the 661 * last packet we enqueued, mark it as the last 662 * descriptor. 663 */ 664 sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR | 665 HDD_CTL_EOCHAIN); 666 SQ_CDTXSYNC(sc, lasttx, 1, 667 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 668 669 /* 670 * There is a potential race condition here if the HPC 671 * DMA channel is active and we try and either update 672 * the 'next descriptor' pointer in the HPC PIO space 673 * or the 'next descriptor' pointer in a previous desc- 674 * riptor. 675 * 676 * To avoid this, if the channel is active, we rely on 677 * the transmit interrupt routine noticing that there 678 * are more packets to send and restarting the HPC DMA 679 * engine, rather than mucking with the DMA state here. 680 */ 681 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 682 HPC_ENETX_CTL); 683 684 if ((status & ENETX_CTL_ACTIVE) != 0) { 685 SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, 686 sc->sc_nfreetx); 687 sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &= 688 ~HDD_CTL_EOCHAIN; 689 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 690 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 691 } else { 692 SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx); 693 694 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 695 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx)); 696 697 /* Kick DMA channel into life */ 698 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 699 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 700 } 701 702 /* Set a watchdog timer in case the chip flakes out. */ 703 ifp->if_timer = 5; 704 } 705} 706 707void 708sq_stop(struct ifnet *ifp, int disable) 709{ 710 int i; 711 struct sq_softc *sc = ifp->if_softc; 712 713 for (i =0; i < SQ_NTXDESC; i++) { 714 if (sc->sc_txmbuf[i] != NULL) { 715 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 716 m_freem(sc->sc_txmbuf[i]); 717 sc->sc_txmbuf[i] = NULL; 718 } 719 } 720 721 /* Clear Seeq transmit/receive command registers */ 722 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0); 723 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0); 724 725 sq_reset(sc); 726 727 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 728 ifp->if_timer = 0; 729} 730 731/* Device timeout/watchdog routine. */ 732void 733sq_watchdog(struct ifnet *ifp) 734{ 735 u_int32_t status; 736 struct sq_softc *sc = ifp->if_softc; 737 738 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 739 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 740 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx, 741 sc->sc_nexttx, sc->sc_nfreetx, status); 742 743 sq_trace_dump(sc); 744 745 memset(&sq_trace, 0, sizeof(sq_trace)); 746 sq_trace_idx = 0; 747 748 ++ifp->if_oerrors; 749 750 sq_init(ifp); 751} 752 753void sq_trace_dump(struct sq_softc* sc) 754{ 755 int i; 756 757 for(i = 0; i < sq_trace_idx; i++) { 758 printf("%s: [%d] action %d, buf %d, free %d, status %08x\n", 759 sc->sc_dev.dv_xname, i, sq_trace[i].action, 760 sq_trace[i].bufno, sq_trace[i].freebuf, 761 sq_trace[i].status); 762 } 763} 764 765static int 766sq_intr(void * arg) 767{ 768 struct sq_softc *sc = arg; 769 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 770 int handled = 0; 771 u_int32_t stat; 772 773 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET); 774 775 if ((stat & 2) == 0) { 776 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname); 777 return 0; 778 } 779 780 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2); 781 782 /* 783 * If the interface isn't running, the interrupt couldn't 784 * possibly have come from us. 785 */ 786 if ((ifp->if_flags & IFF_RUNNING) == 0) 787 return 0; 788 789 sc->sq_intrcnt.ev_count++; 790 791 /* Always check for received packets */ 792 if (sq_rxintr(sc) != 0) 793 handled++; 794 795 /* Only handle transmit interrupts if we actually sent something */ 796 if (sc->sc_nfreetx < SQ_NTXDESC) { 797 sq_txintr(sc); 798 handled++; 799 } 800 801#if NRND > 0 802 if (handled) 803 rnd_add_uint32(&sc->rnd_source, stat); 804#endif 805 return (handled); 806} 807 808static int 809sq_rxintr(struct sq_softc *sc) 810{ 811 int count = 0; 812 struct mbuf* m; 813 int i, framelen; 814 u_int8_t pktstat; 815 u_int32_t status; 816 int new_end, orig_end; 817 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 818 819 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 820 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 821 822 /* If this is a CPU-owned buffer, we're at the end of the list */ 823 if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) { 824#if 0 825 u_int32_t reg; 826 827 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 828 HPC_ENETR_CTL); 829 printf("%s: rxintr: done at %d (ctl %08x)\n", 830 sc->sc_dev.dv_xname, i, reg); 831#endif 832 break; 833 } 834 835 count++; 836 837 m = sc->sc_rxmbuf[i]; 838 framelen = m->m_ext.ext_size - 839 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3; 840 841 /* Now sync the actual packet data */ 842 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 843 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 844 845 pktstat = *((u_int8_t*)m->m_data + framelen + 2); 846 847 if ((pktstat & RXSTAT_GOOD) == 0) { 848 ifp->if_ierrors++; 849 850 if (pktstat & RXSTAT_OFLOW) 851 printf("%s: receive FIFO overflow\n", 852 sc->sc_dev.dv_xname); 853 854 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 855 sc->sc_rxmap[i]->dm_mapsize, 856 BUS_DMASYNC_PREREAD); 857 SQ_INIT_RXDESC(sc, i); 858 continue; 859 } 860 861 if (sq_add_rxbuf(sc, i) != 0) { 862 ifp->if_ierrors++; 863 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 864 sc->sc_rxmap[i]->dm_mapsize, 865 BUS_DMASYNC_PREREAD); 866 SQ_INIT_RXDESC(sc, i); 867 continue; 868 } 869 870 871 m->m_data += 2; 872 m->m_pkthdr.rcvif = ifp; 873 m->m_pkthdr.len = m->m_len = framelen; 874 875 ifp->if_ipackets++; 876 877#if 0 878 printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname, 879 i, framelen); 880#endif 881 882#if NBPFILTER > 0 883 if (ifp->if_bpf) 884 bpf_mtap(ifp->if_bpf, m); 885#endif 886 (*ifp->if_input)(ifp, m); 887 } 888 889 890 /* If anything happened, move ring start/end pointers to new spot */ 891 if (i != sc->sc_nextrx) { 892 new_end = SQ_PREVRX(i); 893 sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN; 894 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 895 BUS_DMASYNC_PREWRITE); 896 897 orig_end = SQ_PREVRX(sc->sc_nextrx); 898 sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN; 899 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 900 BUS_DMASYNC_PREWRITE); 901 902 sc->sc_nextrx = i; 903 } 904 905 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL); 906 907 /* If receive channel is stopped, restart it... */ 908 if ((status & ENETR_CTL_ACTIVE) == 0) { 909 /* Pass the start of the receive ring to the HPC */ 910 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 911 HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx)); 912 913 /* And turn on the HPC ethernet receive channel */ 914 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 915 ENETR_CTL_ACTIVE); 916 } 917 918 return count; 919} 920 921static int 922sq_txintr(struct sq_softc *sc) 923{ 924 int i; 925 u_int32_t status; 926 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 927 928 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 929 930 SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx); 931 932 if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) { 933 if (status & TXSTAT_COLL) 934 ifp->if_collisions++; 935 936 if (status & TXSTAT_UFLOW) { 937 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); 938 ifp->if_oerrors++; 939 } 940 941 if (status & TXSTAT_16COLL) { 942 printf("%s: max collisions reached\n", sc->sc_dev.dv_xname); 943 ifp->if_oerrors++; 944 ifp->if_collisions += 16; 945 } 946 } 947 948 i = sc->sc_prevtx; 949 while (sc->sc_nfreetx < SQ_NTXDESC) { 950 /* 951 * Check status first so we don't end up with a case of 952 * the buffer not being finished while the DMA channel 953 * has gone idle. 954 */ 955 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 956 HPC_ENETX_CTL); 957 958 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 959 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 960 961 /* If not yet transmitted, try and start DMA engine again */ 962 if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) { 963 if ((status & ENETX_CTL_ACTIVE) == 0) { 964 SQ_TRACE(SQ_RESTART_DMA, i, status, 965 sc->sc_nfreetx); 966 967 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 968 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i)); 969 970 /* Kick DMA channel into life */ 971 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 972 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 973 974 /* 975 * Set a watchdog timer in case the chip 976 * flakes out. 977 */ 978 ifp->if_timer = 5; 979 } else { 980 SQ_TRACE(SQ_TXINTR_BUSY, i, status, 981 sc->sc_nfreetx); 982 } 983 break; 984 } 985 986 /* Sync the packet data, unload DMA map, free mbuf */ 987 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 988 sc->sc_txmap[i]->dm_mapsize, 989 BUS_DMASYNC_POSTWRITE); 990 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 991 m_freem(sc->sc_txmbuf[i]); 992 sc->sc_txmbuf[i] = NULL; 993 994 ifp->if_opackets++; 995 sc->sc_nfreetx++; 996 997 SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx); 998 i = SQ_NEXTTX(i); 999 } 1000 1001 /* prevtx now points to next xmit packet not yet finished */ 1002 sc->sc_prevtx = i; 1003 1004 /* If we have buffers free, let upper layers know */ 1005 if (sc->sc_nfreetx > 0) 1006 ifp->if_flags &= ~IFF_OACTIVE; 1007 1008 /* If all packets have left the coop, cancel watchdog */ 1009 if (sc->sc_nfreetx == SQ_NTXDESC) 1010 ifp->if_timer = 0; 1011 1012 SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx); 1013 sq_start(ifp); 1014 1015 return 1; 1016} 1017 1018 1019void 1020sq_reset(struct sq_softc *sc) 1021{ 1022 /* Stop HPC dma channels */ 1023 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0); 1024 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0); 1025 1026 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3); 1027 delay(20); 1028 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0); 1029} 1030 1031/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1032int 1033sq_add_rxbuf(struct sq_softc *sc, int idx) 1034{ 1035 int err; 1036 struct mbuf *m; 1037 1038 MGETHDR(m, M_DONTWAIT, MT_DATA); 1039 if (m == NULL) 1040 return (ENOBUFS); 1041 1042 MCLGET(m, M_DONTWAIT); 1043 if ((m->m_flags & M_EXT) == 0) { 1044 m_freem(m); 1045 return (ENOBUFS); 1046 } 1047 1048 if (sc->sc_rxmbuf[idx] != NULL) 1049 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1050 1051 sc->sc_rxmbuf[idx] = m; 1052 1053 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1054 m->m_ext.ext_buf, m->m_ext.ext_size, 1055 NULL, BUS_DMA_NOWAIT)) != 0) { 1056 printf("%s: can't load rx DMA map %d, error = %d\n", 1057 sc->sc_dev.dv_xname, idx, err); 1058 panic("sq_add_rxbuf"); /* XXX */ 1059 } 1060 1061 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1062 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1063 1064 SQ_INIT_RXDESC(sc, idx); 1065 1066 return 0; 1067} 1068 1069void 1070sq_dump_buffer(u_int32_t addr, u_int32_t len) 1071{ 1072 int i; 1073 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr); 1074 1075 if (len == 0) 1076 return; 1077 1078 printf("%p: ", physaddr); 1079 1080 for(i = 0; i < len; i++) { 1081 printf("%02x ", *(physaddr + i) & 0xff); 1082 if ((i % 16) == 15 && i != len - 1) 1083 printf("\n%p: ", physaddr + i); 1084 } 1085 1086 printf("\n"); 1087} 1088 1089 1090void 1091enaddr_aton(const char* str, u_int8_t* eaddr) 1092{ 1093 int i; 1094 char c; 1095 1096 for(i = 0; i < ETHER_ADDR_LEN; i++) { 1097 if (*str == ':') 1098 str++; 1099 1100 c = *str++; 1101 if (isdigit(c)) { 1102 eaddr[i] = (c - '0'); 1103 } else if (isxdigit(c)) { 1104 eaddr[i] = (toupper(c) + 10 - 'A'); 1105 } 1106 1107 c = *str++; 1108 if (isdigit(c)) { 1109 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 1110 } else if (isxdigit(c)) { 1111 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 1112 } 1113 } 1114} 1115