if_sq.c revision 1.18
1/* $NetBSD: if_sq.c,v 1.18 2003/10/04 09:19:23 tsutsui Exp $ */ 2 3/* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.18 2003/10/04 09:19:23 tsutsui Exp $"); 37 38#include "bpfilter.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/device.h> 43#include <sys/callout.h> 44#include <sys/mbuf.h> 45#include <sys/malloc.h> 46#include <sys/kernel.h> 47#include <sys/socket.h> 48#include <sys/ioctl.h> 49#include <sys/errno.h> 50#include <sys/syslog.h> 51 52#include <uvm/uvm_extern.h> 53 54#include <machine/endian.h> 55 56#include <net/if.h> 57#include <net/if_dl.h> 58#include <net/if_media.h> 59#include <net/if_ether.h> 60 61#if NBPFILTER > 0 62#include <net/bpf.h> 63#endif 64 65#include <machine/bus.h> 66#include <machine/intr.h> 67 68#include <dev/ic/seeq8003reg.h> 69 70#include <sgimips/hpc/sqvar.h> 71#include <sgimips/hpc/hpcvar.h> 72#include <sgimips/hpc/hpcreg.h> 73 74#include <dev/arcbios/arcbios.h> 75#include <dev/arcbios/arcbiosvar.h> 76 77#define static 78 79/* 80 * Short TODO list: 81 * (1) Do counters for bad-RX packets. 82 * (2) Allow multi-segment transmits, instead of copying to a single, 83 * contiguous mbuf. 84 * (3) Verify sq_stop() turns off enough stuff; I was still getting 85 * seeq interrupts after sq_stop(). 86 * (4) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG 87 * or something similar. 88 * (5) Implement EDLC modes: especially packet auto-pad and simplex 89 * mode. 90 * (6) Should the driver filter out its own transmissions in non-EDLC 91 * mode? 92 * (7) Multicast support -- multicast filter, address management, ... 93 * (8) Deal with RB0 (recv buffer overflow) on reception. Will need 94 * to figure out if RB0 is read-only as stated in one spot in the 95 * HPC spec or read-write (ie, is the 'write a one to clear it') 96 * the correct thing? 97 */ 98 99static int sq_match(struct device *, struct cfdata *, void *); 100static void sq_attach(struct device *, struct device *, void *); 101static int sq_init(struct ifnet *); 102static void sq_start(struct ifnet *); 103static void sq_stop(struct ifnet *, int); 104static void sq_watchdog(struct ifnet *); 105static int sq_ioctl(struct ifnet *, u_long, caddr_t); 106 107static void sq_set_filter(struct sq_softc *); 108static int sq_intr(void *); 109static int sq_rxintr(struct sq_softc *); 110static int sq_txintr(struct sq_softc *); 111static void sq_reset(struct sq_softc *); 112static int sq_add_rxbuf(struct sq_softc *, int); 113static void sq_dump_buffer(u_int32_t addr, u_int32_t len); 114 115static void enaddr_aton(const char*, u_int8_t*); 116 117/* Actions */ 118#define SQ_RESET 1 119#define SQ_ADD_TO_DMA 2 120#define SQ_START_DMA 3 121#define SQ_DONE_DMA 4 122#define SQ_RESTART_DMA 5 123#define SQ_TXINTR_ENTER 6 124#define SQ_TXINTR_EXIT 7 125#define SQ_TXINTR_BUSY 8 126 127struct sq_action_trace { 128 int action; 129 int bufno; 130 int status; 131 int freebuf; 132}; 133 134#define SQ_TRACEBUF_SIZE 100 135int sq_trace_idx = 0; 136struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE]; 137 138void sq_trace_dump(struct sq_softc* sc); 139 140#define SQ_TRACE(act, buf, stat, free) do { \ 141 sq_trace[sq_trace_idx].action = (act); \ 142 sq_trace[sq_trace_idx].bufno = (buf); \ 143 sq_trace[sq_trace_idx].status = (stat); \ 144 sq_trace[sq_trace_idx].freebuf = (free); \ 145 if (++sq_trace_idx == SQ_TRACEBUF_SIZE) { \ 146 memset(&sq_trace, 0, sizeof(sq_trace)); \ 147 sq_trace_idx = 0; \ 148 } \ 149} while (0) 150 151CFATTACH_DECL(sq, sizeof(struct sq_softc), 152 sq_match, sq_attach, NULL, NULL); 153 154#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 155 156static int 157sq_match(struct device *parent, struct cfdata *cf, void *aux) 158{ 159 struct hpc_attach_args *ha = aux; 160 161 if (strcmp(ha->ha_name, cf->cf_name) == 0) 162 return (1); 163 164 return (0); 165} 166 167static void 168sq_attach(struct device *parent, struct device *self, void *aux) 169{ 170 int i, err; 171 char* macaddr; 172 struct sq_softc *sc = (void *)self; 173 struct hpc_attach_args *haa = aux; 174 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 175 176 sc->sc_hpct = haa->ha_st; 177 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 178 haa->ha_dmaoff, 179 HPC_ENET_REGS_SIZE, 180 &sc->sc_hpch)) != 0) { 181 printf(": unable to map HPC DMA registers, error = %d\n", err); 182 goto fail_0; 183 } 184 185 sc->sc_regt = haa->ha_st; 186 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 187 haa->ha_devoff, 188 HPC_ENET_DEVREGS_SIZE, 189 &sc->sc_regh)) != 0) { 190 printf(": unable to map Seeq registers, error = %d\n", err); 191 goto fail_0; 192 } 193 194 sc->sc_dmat = haa->ha_dmat; 195 196 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 197 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 198 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 199 printf(": unable to allocate control data, error = %d\n", err); 200 goto fail_0; 201 } 202 203 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 204 sizeof(struct sq_control), 205 (caddr_t *)&sc->sc_control, 206 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 207 printf(": unable to map control data, error = %d\n", err); 208 goto fail_1; 209 } 210 211 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 212 1, sizeof(struct sq_control), PAGE_SIZE, 213 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 214 printf(": unable to create DMA map for control data, error " 215 "= %d\n", err); 216 goto fail_2; 217 } 218 219 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 220 sizeof(struct sq_control), 221 NULL, BUS_DMA_NOWAIT)) != 0) { 222 printf(": unable to load DMA map for control data, error " 223 "= %d\n", err); 224 goto fail_3; 225 } 226 227 memset(sc->sc_control, 0, sizeof(struct sq_control)); 228 229 /* Create transmit buffer DMA maps */ 230 for (i = 0; i < SQ_NTXDESC; i++) { 231 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 232 0, BUS_DMA_NOWAIT, 233 &sc->sc_txmap[i])) != 0) { 234 printf(": unable to create tx DMA map %d, error = %d\n", 235 i, err); 236 goto fail_4; 237 } 238 } 239 240 /* Create transmit buffer DMA maps */ 241 for (i = 0; i < SQ_NRXDESC; i++) { 242 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 243 0, BUS_DMA_NOWAIT, 244 &sc->sc_rxmap[i])) != 0) { 245 printf(": unable to create rx DMA map %d, error = %d\n", 246 i, err); 247 goto fail_5; 248 } 249 } 250 251 /* Pre-allocate the receive buffers. */ 252 for (i = 0; i < SQ_NRXDESC; i++) { 253 if ((err = sq_add_rxbuf(sc, i)) != 0) { 254 printf(": unable to allocate or map rx buffer %d\n," 255 " error = %d\n", i, err); 256 goto fail_6; 257 } 258 } 259 260 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 261 printf(": unable to get MAC address!\n"); 262 goto fail_6; 263 } 264 265 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 266 self->dv_xname, "intr"); 267 268 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 269 printf(": unable to establish interrupt!\n"); 270 goto fail_6; 271 } 272 273 /* Reset the chip to a known state. */ 274 sq_reset(sc); 275 276 /* 277 * Determine if we're an 8003 or 80c03 by setting the first 278 * MAC address register to non-zero, and then reading it back. 279 * If it's zero, we have an 80c03, because we will have read 280 * the TxCollLSB register. 281 */ 282 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5); 283 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0) 284 sc->sc_type = SQ_TYPE_80C03; 285 else 286 sc->sc_type = SQ_TYPE_8003; 287 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00); 288 289 printf(": SGI Seeq %s\n", 290 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 291 292 enaddr_aton(macaddr, sc->sc_enaddr); 293 294 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 295 ether_sprintf(sc->sc_enaddr)); 296 297 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 298 ifp->if_softc = sc; 299 ifp->if_mtu = ETHERMTU; 300 ifp->if_init = sq_init; 301 ifp->if_stop = sq_stop; 302 ifp->if_start = sq_start; 303 ifp->if_ioctl = sq_ioctl; 304 ifp->if_watchdog = sq_watchdog; 305 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 306 IFQ_SET_READY(&ifp->if_snd); 307 308 if_attach(ifp); 309 ether_ifattach(ifp, sc->sc_enaddr); 310 311 memset(&sq_trace, 0, sizeof(sq_trace)); 312 /* Done! */ 313 return; 314 315 /* 316 * Free any resources we've allocated during the failed attach 317 * attempt. Do this in reverse order and fall through. 318 */ 319fail_6: 320 for (i = 0; i < SQ_NRXDESC; i++) { 321 if (sc->sc_rxmbuf[i] != NULL) { 322 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 323 m_freem(sc->sc_rxmbuf[i]); 324 } 325 } 326fail_5: 327 for (i = 0; i < SQ_NRXDESC; i++) { 328 if (sc->sc_rxmap[i] != NULL) 329 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 330 } 331fail_4: 332 for (i = 0; i < SQ_NTXDESC; i++) { 333 if (sc->sc_txmap[i] != NULL) 334 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 335 } 336 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 337fail_3: 338 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 339fail_2: 340 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control, 341 sizeof(struct sq_control)); 342fail_1: 343 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 344fail_0: 345 return; 346} 347 348/* Set up data to get the interface up and running. */ 349int 350sq_init(struct ifnet *ifp) 351{ 352 int i; 353 u_int32_t reg; 354 struct sq_softc *sc = ifp->if_softc; 355 356 /* Cancel any in-progress I/O */ 357 sq_stop(ifp, 0); 358 359 sc->sc_nextrx = 0; 360 361 sc->sc_nfreetx = SQ_NTXDESC; 362 sc->sc_nexttx = sc->sc_prevtx = 0; 363 364 SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx); 365 366 /* Set into 8003 mode, bank 0 to program ethernet address */ 367 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0); 368 369 /* Now write the address */ 370 for (i = 0; i < ETHER_ADDR_LEN; i++) 371 bus_space_write_1(sc->sc_regt, sc->sc_regh, i, 372 sc->sc_enaddr[i]); 373 374 sc->sc_rxcmd = RXCMD_IE_CRC | 375 RXCMD_IE_DRIB | 376 RXCMD_IE_SHORT | 377 RXCMD_IE_END | 378 RXCMD_IE_GOOD; 379 380 /* 381 * Set the receive filter -- this will add some bits to the 382 * prototype RXCMD register. Do this before setting the 383 * transmit config register, since we might need to switch 384 * banks. 385 */ 386 sq_set_filter(sc); 387 388 /* Set up Seeq transmit command register */ 389 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 390 TXCMD_IE_UFLOW | 391 TXCMD_IE_COLL | 392 TXCMD_IE_16COLL | 393 TXCMD_IE_GOOD); 394 395 /* Now write the receive command register. */ 396 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd); 397 398 /* Set up HPC ethernet DMA config */ 399 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG); 400 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG, 401 reg | ENETR_DMACFG_FIX_RXDC | 402 ENETR_DMACFG_FIX_INTR | 403 ENETR_DMACFG_FIX_EOP); 404 405 /* Pass the start of the receive ring to the HPC */ 406 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP, 407 SQ_CDRXADDR(sc, 0)); 408 409 /* And turn on the HPC ethernet receive channel */ 410 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 411 ENETR_CTL_ACTIVE); 412 413 ifp->if_flags |= IFF_RUNNING; 414 ifp->if_flags &= ~IFF_OACTIVE; 415 416 return 0; 417} 418 419static void 420sq_set_filter(struct sq_softc *sc) 421{ 422 struct ethercom *ec = &sc->sc_ethercom; 423 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 424 struct ether_multi *enm; 425 struct ether_multistep step; 426 427 /* 428 * Check for promiscuous mode. Also implies 429 * all-multicast. 430 */ 431 if (ifp->if_flags & IFF_PROMISC) { 432 sc->sc_rxcmd |= RXCMD_REC_ALL; 433 ifp->if_flags |= IFF_ALLMULTI; 434 return; 435 } 436 437 /* 438 * The 8003 has no hash table. If we have any multicast 439 * addresses on the list, enable reception of all multicast 440 * frames. 441 * 442 * XXX The 80c03 has a hash table. We should use it. 443 */ 444 445 ETHER_FIRST_MULTI(step, ec, enm); 446 447 if (enm == NULL) { 448 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 449 sc->sc_rxcmd |= RXCMD_REC_BROAD; 450 451 ifp->if_flags &= ~IFF_ALLMULTI; 452 return; 453 } 454 455 sc->sc_rxcmd |= RXCMD_REC_MULTI; 456 ifp->if_flags |= IFF_ALLMULTI; 457} 458 459int 460sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 461{ 462 int s, error = 0; 463 464 s = splnet(); 465 466 error = ether_ioctl(ifp, cmd, data); 467 if (error == ENETRESET) { 468 /* 469 * Multicast list has changed; set the hardware filter 470 * accordingly. 471 */ 472 error = sq_init(ifp); 473 } 474 475 splx(s); 476 return (error); 477} 478 479void 480sq_start(struct ifnet *ifp) 481{ 482 struct sq_softc *sc = ifp->if_softc; 483 u_int32_t status; 484 struct mbuf *m0, *m; 485 bus_dmamap_t dmamap; 486 int err, totlen, nexttx, firsttx, lasttx, ofree, seg; 487 488 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 489 return; 490 491 /* 492 * Remember the previous number of free descriptors and 493 * the first descriptor we'll use. 494 */ 495 ofree = sc->sc_nfreetx; 496 firsttx = sc->sc_nexttx; 497 498 /* 499 * Loop through the send queue, setting up transmit descriptors 500 * until we drain the queue, or use up all available transmit 501 * descriptors. 502 */ 503 while (sc->sc_nfreetx != 0) { 504 /* 505 * Grab a packet off the queue. 506 */ 507 IFQ_POLL(&ifp->if_snd, m0); 508 if (m0 == NULL) 509 break; 510 m = NULL; 511 512 dmamap = sc->sc_txmap[sc->sc_nexttx]; 513 514 /* 515 * Load the DMA map. If this fails, the packet either 516 * didn't fit in the alloted number of segments, or we were 517 * short on resources. In this case, we'll copy and try 518 * again. 519 * Also copy it if we need to pad, so that we are sure there 520 * is room for the pad buffer. 521 * XXX the right way of doing this is to use a static buffer 522 * for padding and adding it to the transmit descriptor (see 523 * sys/dev/pci/if_tl.c for example). We can't do this here yet 524 * because we can't send packets with more than one fragment. 525 */ 526 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 527 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 528 BUS_DMA_NOWAIT) != 0) { 529 MGETHDR(m, M_DONTWAIT, MT_DATA); 530 if (m == NULL) { 531 printf("%s: unable to allocate Tx mbuf\n", 532 sc->sc_dev.dv_xname); 533 break; 534 } 535 if (m0->m_pkthdr.len > MHLEN) { 536 MCLGET(m, M_DONTWAIT); 537 if ((m->m_flags & M_EXT) == 0) { 538 printf("%s: unable to allocate Tx " 539 "cluster\n", sc->sc_dev.dv_xname); 540 m_freem(m); 541 break; 542 } 543 } 544 545 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 546 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 547 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 548 ETHER_PAD_LEN - m0->m_pkthdr.len); 549 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 550 } else 551 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 552 553 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 554 m, BUS_DMA_NOWAIT)) != 0) { 555 printf("%s: unable to load Tx buffer, " 556 "error = %d\n", sc->sc_dev.dv_xname, err); 557 break; 558 } 559 } 560 561 /* 562 * Ensure we have enough descriptors free to describe 563 * the packet. 564 */ 565 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 566 /* 567 * Not enough free descriptors to transmit this 568 * packet. We haven't committed to anything yet, 569 * so just unload the DMA map, put the packet 570 * back on the queue, and punt. Notify the upper 571 * layer that there are no more slots left. 572 * 573 * XXX We could allocate an mbuf and copy, but 574 * XXX it is worth it? 575 */ 576 ifp->if_flags |= IFF_OACTIVE; 577 bus_dmamap_unload(sc->sc_dmat, dmamap); 578 if (m != NULL) 579 m_freem(m); 580 break; 581 } 582 583 IFQ_DEQUEUE(&ifp->if_snd, m0); 584#if NBPFILTER > 0 585 /* 586 * Pass the packet to any BPF listeners. 587 */ 588 if (ifp->if_bpf) 589 bpf_mtap(ifp->if_bpf, m0); 590#endif /* NBPFILTER > 0 */ 591 if (m != NULL) { 592 m_freem(m0); 593 m0 = m; 594 } 595 596 /* 597 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 598 */ 599 600 /* Sync the DMA map. */ 601 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 602 BUS_DMASYNC_PREWRITE); 603 604 /* 605 * Initialize the transmit descriptors. 606 */ 607 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 608 seg < dmamap->dm_nsegs; 609 seg++, nexttx = SQ_NEXTTX(nexttx)) { 610 sc->sc_txdesc[nexttx].hdd_bufptr = 611 dmamap->dm_segs[seg].ds_addr; 612 sc->sc_txdesc[nexttx].hdd_ctl = 613 dmamap->dm_segs[seg].ds_len; 614 sc->sc_txdesc[nexttx].hdd_descptr= 615 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 616 lasttx = nexttx; 617 totlen += dmamap->dm_segs[seg].ds_len; 618 } 619 620 /* Last descriptor gets end-of-packet */ 621 sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET; 622 623#if 0 624 printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, 625 sc->sc_nexttx, lasttx, 626 totlen); 627#endif 628 629 if (ifp->if_flags & IFF_DEBUG) { 630 printf(" transmit chain:\n"); 631 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 632 printf(" descriptor %d:\n", seg); 633 printf(" hdd_bufptr: 0x%08x\n", 634 sc->sc_txdesc[seg].hdd_bufptr); 635 printf(" hdd_ctl: 0x%08x\n", 636 sc->sc_txdesc[seg].hdd_ctl); 637 printf(" hdd_descptr: 0x%08x\n", 638 sc->sc_txdesc[seg].hdd_descptr); 639 640 if (seg == lasttx) 641 break; 642 } 643 } 644 645 /* Sync the descriptors we're using. */ 646 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 647 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 648 649 /* Store a pointer to the packet so we can free it later */ 650 sc->sc_txmbuf[sc->sc_nexttx] = m0; 651 652 /* Advance the tx pointer. */ 653 sc->sc_nfreetx -= dmamap->dm_nsegs; 654 sc->sc_nexttx = nexttx; 655 656 } 657 658 /* All transmit descriptors used up, let upper layers know */ 659 if (sc->sc_nfreetx == 0) 660 ifp->if_flags |= IFF_OACTIVE; 661 662 if (sc->sc_nfreetx != ofree) { 663#if 0 664 printf("%s: %d packets enqueued, first %d, INTR on %d\n", 665 sc->sc_dev.dv_xname, lasttx - firsttx + 1, 666 firsttx, lasttx); 667#endif 668 669 /* 670 * Cause a transmit interrupt to happen on the 671 * last packet we enqueued, mark it as the last 672 * descriptor. 673 */ 674 sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR | 675 HDD_CTL_EOCHAIN); 676 SQ_CDTXSYNC(sc, lasttx, 1, 677 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 678 679 /* 680 * There is a potential race condition here if the HPC 681 * DMA channel is active and we try and either update 682 * the 'next descriptor' pointer in the HPC PIO space 683 * or the 'next descriptor' pointer in a previous desc- 684 * riptor. 685 * 686 * To avoid this, if the channel is active, we rely on 687 * the transmit interrupt routine noticing that there 688 * are more packets to send and restarting the HPC DMA 689 * engine, rather than mucking with the DMA state here. 690 */ 691 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 692 HPC_ENETX_CTL); 693 694 if ((status & ENETX_CTL_ACTIVE) != 0) { 695 SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, 696 sc->sc_nfreetx); 697 sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &= 698 ~HDD_CTL_EOCHAIN; 699 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 700 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 701 } else { 702 SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx); 703 704 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 705 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx)); 706 707 /* Kick DMA channel into life */ 708 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 709 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 710 } 711 712 /* Set a watchdog timer in case the chip flakes out. */ 713 ifp->if_timer = 5; 714 } 715} 716 717void 718sq_stop(struct ifnet *ifp, int disable) 719{ 720 int i; 721 struct sq_softc *sc = ifp->if_softc; 722 723 for (i =0; i < SQ_NTXDESC; i++) { 724 if (sc->sc_txmbuf[i] != NULL) { 725 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 726 m_freem(sc->sc_txmbuf[i]); 727 sc->sc_txmbuf[i] = NULL; 728 } 729 } 730 731 /* Clear Seeq transmit/receive command registers */ 732 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0); 733 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0); 734 735 sq_reset(sc); 736 737 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 738 ifp->if_timer = 0; 739} 740 741/* Device timeout/watchdog routine. */ 742void 743sq_watchdog(struct ifnet *ifp) 744{ 745 u_int32_t status; 746 struct sq_softc *sc = ifp->if_softc; 747 748 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 749 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 750 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx, 751 sc->sc_nexttx, sc->sc_nfreetx, status); 752 753 sq_trace_dump(sc); 754 755 memset(&sq_trace, 0, sizeof(sq_trace)); 756 sq_trace_idx = 0; 757 758 ++ifp->if_oerrors; 759 760 sq_init(ifp); 761} 762 763void sq_trace_dump(struct sq_softc* sc) 764{ 765 int i; 766 767 for(i = 0; i < sq_trace_idx; i++) { 768 printf("%s: [%d] action %d, buf %d, free %d, status %08x\n", 769 sc->sc_dev.dv_xname, i, sq_trace[i].action, 770 sq_trace[i].bufno, sq_trace[i].freebuf, 771 sq_trace[i].status); 772 } 773} 774 775static int 776sq_intr(void * arg) 777{ 778 struct sq_softc *sc = arg; 779 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 780 int handled = 0; 781 u_int32_t stat; 782 783 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET); 784 785 if ((stat & 2) == 0) { 786 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname); 787 return 0; 788 } 789 790 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2); 791 792 /* 793 * If the interface isn't running, the interrupt couldn't 794 * possibly have come from us. 795 */ 796 if ((ifp->if_flags & IFF_RUNNING) == 0) 797 return 0; 798 799 sc->sq_intrcnt.ev_count++; 800 801 /* Always check for received packets */ 802 if (sq_rxintr(sc) != 0) 803 handled++; 804 805 /* Only handle transmit interrupts if we actually sent something */ 806 if (sc->sc_nfreetx < SQ_NTXDESC) { 807 sq_txintr(sc); 808 handled++; 809 } 810 811#if NRND > 0 812 if (handled) 813 rnd_add_uint32(&sc->rnd_source, stat); 814#endif 815 return (handled); 816} 817 818static int 819sq_rxintr(struct sq_softc *sc) 820{ 821 int count = 0; 822 struct mbuf* m; 823 int i, framelen; 824 u_int8_t pktstat; 825 u_int32_t status; 826 int new_end, orig_end; 827 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 828 829 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 830 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 831 832 /* If this is a CPU-owned buffer, we're at the end of the list */ 833 if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) { 834#if 0 835 u_int32_t reg; 836 837 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 838 HPC_ENETR_CTL); 839 printf("%s: rxintr: done at %d (ctl %08x)\n", 840 sc->sc_dev.dv_xname, i, reg); 841#endif 842 break; 843 } 844 845 count++; 846 847 m = sc->sc_rxmbuf[i]; 848 framelen = m->m_ext.ext_size - 849 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3; 850 851 /* Now sync the actual packet data */ 852 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 853 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 854 855 pktstat = *((u_int8_t*)m->m_data + framelen + 2); 856 857 if ((pktstat & RXSTAT_GOOD) == 0) { 858 ifp->if_ierrors++; 859 860 if (pktstat & RXSTAT_OFLOW) 861 printf("%s: receive FIFO overflow\n", 862 sc->sc_dev.dv_xname); 863 864 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 865 sc->sc_rxmap[i]->dm_mapsize, 866 BUS_DMASYNC_PREREAD); 867 SQ_INIT_RXDESC(sc, i); 868 continue; 869 } 870 871 if (sq_add_rxbuf(sc, i) != 0) { 872 ifp->if_ierrors++; 873 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 874 sc->sc_rxmap[i]->dm_mapsize, 875 BUS_DMASYNC_PREREAD); 876 SQ_INIT_RXDESC(sc, i); 877 continue; 878 } 879 880 881 m->m_data += 2; 882 m->m_pkthdr.rcvif = ifp; 883 m->m_pkthdr.len = m->m_len = framelen; 884 885 ifp->if_ipackets++; 886 887#if 0 888 printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname, 889 i, framelen); 890#endif 891 892#if NBPFILTER > 0 893 if (ifp->if_bpf) 894 bpf_mtap(ifp->if_bpf, m); 895#endif 896 (*ifp->if_input)(ifp, m); 897 } 898 899 900 /* If anything happened, move ring start/end pointers to new spot */ 901 if (i != sc->sc_nextrx) { 902 new_end = SQ_PREVRX(i); 903 sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN; 904 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 905 BUS_DMASYNC_PREWRITE); 906 907 orig_end = SQ_PREVRX(sc->sc_nextrx); 908 sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN; 909 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 910 BUS_DMASYNC_PREWRITE); 911 912 sc->sc_nextrx = i; 913 } 914 915 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL); 916 917 /* If receive channel is stopped, restart it... */ 918 if ((status & ENETR_CTL_ACTIVE) == 0) { 919 /* Pass the start of the receive ring to the HPC */ 920 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 921 HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx)); 922 923 /* And turn on the HPC ethernet receive channel */ 924 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 925 ENETR_CTL_ACTIVE); 926 } 927 928 return count; 929} 930 931static int 932sq_txintr(struct sq_softc *sc) 933{ 934 int i; 935 u_int32_t status; 936 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 937 938 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 939 940 SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx); 941 942 if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) { 943 if (status & TXSTAT_COLL) 944 ifp->if_collisions++; 945 946 if (status & TXSTAT_UFLOW) { 947 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); 948 ifp->if_oerrors++; 949 } 950 951 if (status & TXSTAT_16COLL) { 952 printf("%s: max collisions reached\n", sc->sc_dev.dv_xname); 953 ifp->if_oerrors++; 954 ifp->if_collisions += 16; 955 } 956 } 957 958 i = sc->sc_prevtx; 959 while (sc->sc_nfreetx < SQ_NTXDESC) { 960 /* 961 * Check status first so we don't end up with a case of 962 * the buffer not being finished while the DMA channel 963 * has gone idle. 964 */ 965 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 966 HPC_ENETX_CTL); 967 968 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 969 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 970 971 /* If not yet transmitted, try and start DMA engine again */ 972 if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) { 973 if ((status & ENETX_CTL_ACTIVE) == 0) { 974 SQ_TRACE(SQ_RESTART_DMA, i, status, 975 sc->sc_nfreetx); 976 977 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 978 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i)); 979 980 /* Kick DMA channel into life */ 981 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 982 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 983 984 /* 985 * Set a watchdog timer in case the chip 986 * flakes out. 987 */ 988 ifp->if_timer = 5; 989 } else { 990 SQ_TRACE(SQ_TXINTR_BUSY, i, status, 991 sc->sc_nfreetx); 992 } 993 break; 994 } 995 996 /* Sync the packet data, unload DMA map, free mbuf */ 997 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 998 sc->sc_txmap[i]->dm_mapsize, 999 BUS_DMASYNC_POSTWRITE); 1000 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1001 m_freem(sc->sc_txmbuf[i]); 1002 sc->sc_txmbuf[i] = NULL; 1003 1004 ifp->if_opackets++; 1005 sc->sc_nfreetx++; 1006 1007 SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx); 1008 i = SQ_NEXTTX(i); 1009 } 1010 1011 /* prevtx now points to next xmit packet not yet finished */ 1012 sc->sc_prevtx = i; 1013 1014 /* If we have buffers free, let upper layers know */ 1015 if (sc->sc_nfreetx > 0) 1016 ifp->if_flags &= ~IFF_OACTIVE; 1017 1018 /* If all packets have left the coop, cancel watchdog */ 1019 if (sc->sc_nfreetx == SQ_NTXDESC) 1020 ifp->if_timer = 0; 1021 1022 SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx); 1023 sq_start(ifp); 1024 1025 return 1; 1026} 1027 1028 1029void 1030sq_reset(struct sq_softc *sc) 1031{ 1032 /* Stop HPC dma channels */ 1033 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0); 1034 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0); 1035 1036 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3); 1037 delay(20); 1038 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0); 1039} 1040 1041/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1042int 1043sq_add_rxbuf(struct sq_softc *sc, int idx) 1044{ 1045 int err; 1046 struct mbuf *m; 1047 1048 MGETHDR(m, M_DONTWAIT, MT_DATA); 1049 if (m == NULL) 1050 return (ENOBUFS); 1051 1052 MCLGET(m, M_DONTWAIT); 1053 if ((m->m_flags & M_EXT) == 0) { 1054 m_freem(m); 1055 return (ENOBUFS); 1056 } 1057 1058 if (sc->sc_rxmbuf[idx] != NULL) 1059 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1060 1061 sc->sc_rxmbuf[idx] = m; 1062 1063 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1064 m->m_ext.ext_buf, m->m_ext.ext_size, 1065 NULL, BUS_DMA_NOWAIT)) != 0) { 1066 printf("%s: can't load rx DMA map %d, error = %d\n", 1067 sc->sc_dev.dv_xname, idx, err); 1068 panic("sq_add_rxbuf"); /* XXX */ 1069 } 1070 1071 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1072 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1073 1074 SQ_INIT_RXDESC(sc, idx); 1075 1076 return 0; 1077} 1078 1079void 1080sq_dump_buffer(u_int32_t addr, u_int32_t len) 1081{ 1082 u_int i; 1083 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr); 1084 1085 if (len == 0) 1086 return; 1087 1088 printf("%p: ", physaddr); 1089 1090 for(i = 0; i < len; i++) { 1091 printf("%02x ", *(physaddr + i) & 0xff); 1092 if ((i % 16) == 15 && i != len - 1) 1093 printf("\n%p: ", physaddr + i); 1094 } 1095 1096 printf("\n"); 1097} 1098 1099 1100void 1101enaddr_aton(const char* str, u_int8_t* eaddr) 1102{ 1103 int i; 1104 char c; 1105 1106 for(i = 0; i < ETHER_ADDR_LEN; i++) { 1107 if (*str == ':') 1108 str++; 1109 1110 c = *str++; 1111 if (isdigit(c)) { 1112 eaddr[i] = (c - '0'); 1113 } else if (isxdigit(c)) { 1114 eaddr[i] = (toupper(c) + 10 - 'A'); 1115 } 1116 1117 c = *str++; 1118 if (isdigit(c)) { 1119 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 1120 } else if (isxdigit(c)) { 1121 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 1122 } 1123 } 1124} 1125