if_sq.c revision 1.39
1/* $NetBSD: if_sq.c,v 1.39 2011/01/25 12:43:30 tsutsui Exp $ */ 2 3/* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.39 2011/01/25 12:43:30 tsutsui Exp $"); 37 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/device.h> 42#include <sys/callout.h> 43#include <sys/mbuf.h> 44#include <sys/malloc.h> 45#include <sys/kernel.h> 46#include <sys/socket.h> 47#include <sys/ioctl.h> 48#include <sys/errno.h> 49#include <sys/syslog.h> 50 51#include <uvm/uvm_extern.h> 52 53#include <machine/endian.h> 54 55#include <net/if.h> 56#include <net/if_dl.h> 57#include <net/if_media.h> 58#include <net/if_ether.h> 59 60#include <net/bpf.h> 61 62#include <machine/bus.h> 63#include <machine/intr.h> 64#include <machine/sysconf.h> 65 66#include <dev/ic/seeq8003reg.h> 67 68#include <sgimips/hpc/sqvar.h> 69#include <sgimips/hpc/hpcvar.h> 70#include <sgimips/hpc/hpcreg.h> 71 72#include <dev/arcbios/arcbios.h> 73#include <dev/arcbios/arcbiosvar.h> 74 75#define static 76 77/* 78 * Short TODO list: 79 * (1) Do counters for bad-RX packets. 80 * (2) Allow multi-segment transmits, instead of copying to a single, 81 * contiguous mbuf. 82 * (3) Verify sq_stop() turns off enough stuff; I was still getting 83 * seeq interrupts after sq_stop(). 84 * (4) Implement EDLC modes: especially packet auto-pad and simplex 85 * mode. 86 * (5) Should the driver filter out its own transmissions in non-EDLC 87 * mode? 88 * (6) Multicast support -- multicast filter, address management, ... 89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need 90 * to figure out if RB0 is read-only as stated in one spot in the 91 * HPC spec or read-write (ie, is the 'write a one to clear it') 92 * the correct thing? 93 */ 94 95#if defined(SQ_DEBUG) 96 int sq_debug = 0; 97 #define SQ_DPRINTF(x) if (sq_debug) printf x 98#else 99 #define SQ_DPRINTF(x) 100#endif 101 102static int sq_match(device_t, cfdata_t, void *); 103static void sq_attach(device_t, device_t, void *); 104static int sq_init(struct ifnet *); 105static void sq_start(struct ifnet *); 106static void sq_stop(struct ifnet *, int); 107static void sq_watchdog(struct ifnet *); 108static int sq_ioctl(struct ifnet *, u_long, void *); 109 110static void sq_set_filter(struct sq_softc *); 111static int sq_intr(void *); 112static int sq_rxintr(struct sq_softc *); 113static int sq_txintr(struct sq_softc *); 114static void sq_txring_hpc1(struct sq_softc *); 115static void sq_txring_hpc3(struct sq_softc *); 116static void sq_reset(struct sq_softc *); 117static int sq_add_rxbuf(struct sq_softc *, int); 118static void sq_dump_buffer(paddr_t addr, psize_t len); 119static void sq_trace_dump(struct sq_softc *); 120 121CFATTACH_DECL_NEW(sq, sizeof(struct sq_softc), 122 sq_match, sq_attach, NULL, NULL); 123 124#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 125 126#define sq_seeq_read(sc, off) \ 127 bus_space_read_1(sc->sc_regt, sc->sc_regh, off) 128#define sq_seeq_write(sc, off, val) \ 129 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val) 130 131#define sq_hpc_read(sc, off) \ 132 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off) 133#define sq_hpc_write(sc, off, val) \ 134 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val) 135 136/* MAC address offset for non-onboard implementations */ 137#define SQ_HPC_EEPROM_ENADDR 250 138 139#define SGI_OUI_0 0x08 140#define SGI_OUI_1 0x00 141#define SGI_OUI_2 0x69 142 143static int 144sq_match(device_t parent, cfdata_t cf, void *aux) 145{ 146 struct hpc_attach_args *ha = aux; 147 148 if (strcmp(ha->ha_name, cf->cf_name) == 0) { 149 vaddr_t reset, txstat; 150 151 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 152 ha->ha_dmaoff + ha->hpc_regs->enetr_reset); 153 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 154 ha->ha_devoff + (SEEQ_TXSTAT << 2)); 155 156 if (platform.badaddr((void *)reset, sizeof(reset))) 157 return (0); 158 159 *(volatile uint32_t *)reset = 0x1; 160 delay(20); 161 *(volatile uint32_t *)reset = 0x0; 162 163 if (platform.badaddr((void *)txstat, sizeof(txstat))) 164 return (0); 165 166 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW) 167 return (1); 168 } 169 170 return (0); 171} 172 173static void 174sq_attach(device_t parent, device_t self, void *aux) 175{ 176 int i, err; 177 const char* macaddr; 178 struct sq_softc *sc = device_private(self); 179 struct hpc_attach_args *haa = aux; 180 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 181 182 sc->sc_dev = self; 183 sc->sc_hpct = haa->ha_st; 184 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */ 185 186 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 187 haa->ha_dmaoff, 188 sc->hpc_regs->enet_regs_size, 189 &sc->sc_hpch)) != 0) { 190 printf(": unable to map HPC DMA registers, error = %d\n", err); 191 goto fail_0; 192 } 193 194 sc->sc_regt = haa->ha_st; 195 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 196 haa->ha_devoff, 197 sc->hpc_regs->enet_devregs_size, 198 &sc->sc_regh)) != 0) { 199 printf(": unable to map Seeq registers, error = %d\n", err); 200 goto fail_0; 201 } 202 203 sc->sc_dmat = haa->ha_dmat; 204 205 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 206 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 207 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 208 printf(": unable to allocate control data, error = %d\n", err); 209 goto fail_0; 210 } 211 212 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 213 sizeof(struct sq_control), 214 (void **)&sc->sc_control, 215 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 216 printf(": unable to map control data, error = %d\n", err); 217 goto fail_1; 218 } 219 220 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 221 1, sizeof(struct sq_control), PAGE_SIZE, 222 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 223 printf(": unable to create DMA map for control data, error " 224 "= %d\n", err); 225 goto fail_2; 226 } 227 228 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 229 sizeof(struct sq_control), 230 NULL, BUS_DMA_NOWAIT)) != 0) { 231 printf(": unable to load DMA map for control data, error " 232 "= %d\n", err); 233 goto fail_3; 234 } 235 236 memset(sc->sc_control, 0, sizeof(struct sq_control)); 237 238 /* Create transmit buffer DMA maps */ 239 for (i = 0; i < SQ_NTXDESC; i++) { 240 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 241 0, BUS_DMA_NOWAIT, 242 &sc->sc_txmap[i])) != 0) { 243 printf(": unable to create tx DMA map %d, error = %d\n", 244 i, err); 245 goto fail_4; 246 } 247 } 248 249 /* Create receive buffer DMA maps */ 250 for (i = 0; i < SQ_NRXDESC; i++) { 251 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 252 0, BUS_DMA_NOWAIT, 253 &sc->sc_rxmap[i])) != 0) { 254 printf(": unable to create rx DMA map %d, error = %d\n", 255 i, err); 256 goto fail_5; 257 } 258 } 259 260 /* Pre-allocate the receive buffers. */ 261 for (i = 0; i < SQ_NRXDESC; i++) { 262 if ((err = sq_add_rxbuf(sc, i)) != 0) { 263 printf(": unable to allocate or map rx buffer %d\n," 264 " error = %d\n", i, err); 265 goto fail_6; 266 } 267 } 268 269 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR], 270 ETHER_ADDR_LEN); 271 272 /* 273 * If our mac address is bogus, obtain it from ARCBIOS. This will 274 * be true of the onboard HPC3 on IP22, since there is no eeprom, 275 * but rather the DS1386 RTC's battery-backed ram is used. 276 */ 277 if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 || 278 sc->sc_enaddr[2] != SGI_OUI_2) { 279 macaddr = ARCBIOS->GetEnvironmentVariable("eaddr"); 280 if (macaddr == NULL) { 281 printf(": unable to get MAC address!\n"); 282 goto fail_6; 283 } 284 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr); 285 } 286 287 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 288 device_xname(self), "intr"); 289 290 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 291 printf(": unable to establish interrupt!\n"); 292 goto fail_6; 293 } 294 295 /* Reset the chip to a known state. */ 296 sq_reset(sc); 297 298 /* 299 * Determine if we're an 8003 or 80c03 by setting the first 300 * MAC address register to non-zero, and then reading it back. 301 * If it's zero, we have an 80c03, because we will have read 302 * the TxCollLSB register. 303 */ 304 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5); 305 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0) 306 sc->sc_type = SQ_TYPE_80C03; 307 else 308 sc->sc_type = SQ_TYPE_8003; 309 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00); 310 311 printf(": SGI Seeq %s\n", 312 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 313 314 printf("%s: Ethernet address %s\n", 315 device_xname(self), ether_sprintf(sc->sc_enaddr)); 316 317 strcpy(ifp->if_xname, device_xname(self)); 318 ifp->if_softc = sc; 319 ifp->if_mtu = ETHERMTU; 320 ifp->if_init = sq_init; 321 ifp->if_stop = sq_stop; 322 ifp->if_start = sq_start; 323 ifp->if_ioctl = sq_ioctl; 324 ifp->if_watchdog = sq_watchdog; 325 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 326 IFQ_SET_READY(&ifp->if_snd); 327 328 if_attach(ifp); 329 ether_ifattach(ifp, sc->sc_enaddr); 330 331 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 332 /* Done! */ 333 return; 334 335 /* 336 * Free any resources we've allocated during the failed attach 337 * attempt. Do this in reverse order and fall through. 338 */ 339fail_6: 340 for (i = 0; i < SQ_NRXDESC; i++) { 341 if (sc->sc_rxmbuf[i] != NULL) { 342 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 343 m_freem(sc->sc_rxmbuf[i]); 344 } 345 } 346fail_5: 347 for (i = 0; i < SQ_NRXDESC; i++) { 348 if (sc->sc_rxmap[i] != NULL) 349 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 350 } 351fail_4: 352 for (i = 0; i < SQ_NTXDESC; i++) { 353 if (sc->sc_txmap[i] != NULL) 354 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 355 } 356 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 357fail_3: 358 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 359fail_2: 360 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control, 361 sizeof(struct sq_control)); 362fail_1: 363 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 364fail_0: 365 return; 366} 367 368/* Set up data to get the interface up and running. */ 369int 370sq_init(struct ifnet *ifp) 371{ 372 int i; 373 struct sq_softc *sc = ifp->if_softc; 374 375 /* Cancel any in-progress I/O */ 376 sq_stop(ifp, 0); 377 378 sc->sc_nextrx = 0; 379 380 sc->sc_nfreetx = SQ_NTXDESC; 381 sc->sc_nexttx = sc->sc_prevtx = 0; 382 383 SQ_TRACE(SQ_RESET, sc, 0, 0); 384 385 /* Set into 8003 mode, bank 0 to program ethernet address */ 386 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0); 387 388 /* Now write the address */ 389 for (i = 0; i < ETHER_ADDR_LEN; i++) 390 sq_seeq_write(sc, i, sc->sc_enaddr[i]); 391 392 sc->sc_rxcmd = RXCMD_IE_CRC | 393 RXCMD_IE_DRIB | 394 RXCMD_IE_SHORT | 395 RXCMD_IE_END | 396 RXCMD_IE_GOOD; 397 398 /* 399 * Set the receive filter -- this will add some bits to the 400 * prototype RXCMD register. Do this before setting the 401 * transmit config register, since we might need to switch 402 * banks. 403 */ 404 sq_set_filter(sc); 405 406 /* Set up Seeq transmit command register */ 407 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW | 408 TXCMD_IE_COLL | 409 TXCMD_IE_16COLL | 410 TXCMD_IE_GOOD); 411 412 /* Now write the receive command register. */ 413 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd); 414 415 /* 416 * Set up HPC ethernet PIO and DMA configurations. 417 * 418 * The PROM appears to do most of this for the onboard HPC3, but 419 * not for the Challenge S's IOPLUS chip. We copy how the onboard 420 * chip is configured and assume that it's correct for both. 421 */ 422 if (sc->hpc_regs->revision == 3) { 423 uint32_t dmareg, pioreg; 424 425 pioreg = HPC3_ENETR_PIOCFG_P1(1) | 426 HPC3_ENETR_PIOCFG_P2(6) | 427 HPC3_ENETR_PIOCFG_P3(1); 428 429 dmareg = HPC3_ENETR_DMACFG_D1(6) | 430 HPC3_ENETR_DMACFG_D2(2) | 431 HPC3_ENETR_DMACFG_D3(0) | 432 HPC3_ENETR_DMACFG_FIX_RXDC | 433 HPC3_ENETR_DMACFG_FIX_INTR | 434 HPC3_ENETR_DMACFG_FIX_EOP | 435 HPC3_ENETR_DMACFG_TIMEOUT; 436 437 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg); 438 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg); 439 } 440 441 /* Pass the start of the receive ring to the HPC */ 442 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0)); 443 444 /* And turn on the HPC ethernet receive channel */ 445 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 446 sc->hpc_regs->enetr_ctl_active); 447 448 /* 449 * Turn off delayed receive interrupts on HPC1. 450 * (see Hollywood HPC Specification 2.1.4.3) 451 */ 452 if (sc->hpc_regs->revision != 3) 453 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF); 454 455 ifp->if_flags |= IFF_RUNNING; 456 ifp->if_flags &= ~IFF_OACTIVE; 457 458 return 0; 459} 460 461static void 462sq_set_filter(struct sq_softc *sc) 463{ 464 struct ethercom *ec = &sc->sc_ethercom; 465 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 466 struct ether_multi *enm; 467 struct ether_multistep step; 468 469 /* 470 * Check for promiscuous mode. Also implies 471 * all-multicast. 472 */ 473 if (ifp->if_flags & IFF_PROMISC) { 474 sc->sc_rxcmd |= RXCMD_REC_ALL; 475 ifp->if_flags |= IFF_ALLMULTI; 476 return; 477 } 478 479 /* 480 * The 8003 has no hash table. If we have any multicast 481 * addresses on the list, enable reception of all multicast 482 * frames. 483 * 484 * XXX The 80c03 has a hash table. We should use it. 485 */ 486 487 ETHER_FIRST_MULTI(step, ec, enm); 488 489 if (enm == NULL) { 490 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 491 sc->sc_rxcmd |= RXCMD_REC_BROAD; 492 493 ifp->if_flags &= ~IFF_ALLMULTI; 494 return; 495 } 496 497 sc->sc_rxcmd |= RXCMD_REC_MULTI; 498 ifp->if_flags |= IFF_ALLMULTI; 499} 500 501int 502sq_ioctl(struct ifnet *ifp, u_long cmd, void *data) 503{ 504 int s, error = 0; 505 506 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0); 507 508 s = splnet(); 509 510 error = ether_ioctl(ifp, cmd, data); 511 if (error == ENETRESET) { 512 /* 513 * Multicast list has changed; set the hardware filter 514 * accordingly. 515 */ 516 if (ifp->if_flags & IFF_RUNNING) 517 error = sq_init(ifp); 518 else 519 error = 0; 520 } 521 522 splx(s); 523 return (error); 524} 525 526void 527sq_start(struct ifnet *ifp) 528{ 529 struct sq_softc *sc = ifp->if_softc; 530 uint32_t status; 531 struct mbuf *m0, *m; 532 bus_dmamap_t dmamap; 533 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg; 534 535 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 536 return; 537 538 /* 539 * Remember the previous number of free descriptors and 540 * the first descriptor we'll use. 541 */ 542 ofree = sc->sc_nfreetx; 543 firsttx = sc->sc_nexttx; 544 545 /* 546 * Loop through the send queue, setting up transmit descriptors 547 * until we drain the queue, or use up all available transmit 548 * descriptors. 549 */ 550 while (sc->sc_nfreetx != 0) { 551 /* 552 * Grab a packet off the queue. 553 */ 554 IFQ_POLL(&ifp->if_snd, m0); 555 if (m0 == NULL) 556 break; 557 m = NULL; 558 559 dmamap = sc->sc_txmap[sc->sc_nexttx]; 560 561 /* 562 * Load the DMA map. If this fails, the packet either 563 * didn't fit in the alloted number of segments, or we were 564 * short on resources. In this case, we'll copy and try 565 * again. 566 * Also copy it if we need to pad, so that we are sure there 567 * is room for the pad buffer. 568 * XXX the right way of doing this is to use a static buffer 569 * for padding and adding it to the transmit descriptor (see 570 * sys/dev/pci/if_tl.c for example). We can't do this here yet 571 * because we can't send packets with more than one fragment. 572 */ 573 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 574 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 575 BUS_DMA_NOWAIT) != 0) { 576 MGETHDR(m, M_DONTWAIT, MT_DATA); 577 if (m == NULL) { 578 printf("%s: unable to allocate Tx mbuf\n", 579 device_xname(sc->sc_dev)); 580 break; 581 } 582 if (m0->m_pkthdr.len > MHLEN) { 583 MCLGET(m, M_DONTWAIT); 584 if ((m->m_flags & M_EXT) == 0) { 585 printf("%s: unable to allocate Tx " 586 "cluster\n", 587 device_xname(sc->sc_dev)); 588 m_freem(m); 589 break; 590 } 591 } 592 593 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 594 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 595 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 596 ETHER_PAD_LEN - m0->m_pkthdr.len); 597 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 598 } else 599 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 600 601 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 602 m, BUS_DMA_NOWAIT)) != 0) { 603 printf("%s: unable to load Tx buffer, " 604 "error = %d\n", 605 device_xname(sc->sc_dev), err); 606 break; 607 } 608 } 609 610 /* 611 * Ensure we have enough descriptors free to describe 612 * the packet. 613 */ 614 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 615 /* 616 * Not enough free descriptors to transmit this 617 * packet. We haven't committed to anything yet, 618 * so just unload the DMA map, put the packet 619 * back on the queue, and punt. Notify the upper 620 * layer that there are no more slots left. 621 * 622 * XXX We could allocate an mbuf and copy, but 623 * XXX it is worth it? 624 */ 625 ifp->if_flags |= IFF_OACTIVE; 626 bus_dmamap_unload(sc->sc_dmat, dmamap); 627 if (m != NULL) 628 m_freem(m); 629 break; 630 } 631 632 IFQ_DEQUEUE(&ifp->if_snd, m0); 633 /* 634 * Pass the packet to any BPF listeners. 635 */ 636 bpf_mtap(ifp, m0); 637 if (m != NULL) { 638 m_freem(m0); 639 m0 = m; 640 } 641 642 /* 643 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 644 */ 645 646 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0); 647 648 /* Sync the DMA map. */ 649 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 650 BUS_DMASYNC_PREWRITE); 651 652 /* 653 * Initialize the transmit descriptors. 654 */ 655 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 656 seg < dmamap->dm_nsegs; 657 seg++, nexttx = SQ_NEXTTX(nexttx)) { 658 if (sc->hpc_regs->revision == 3) { 659 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr = 660 dmamap->dm_segs[seg].ds_addr; 661 sc->sc_txdesc[nexttx].hpc3_hdd_ctl = 662 dmamap->dm_segs[seg].ds_len; 663 } else { 664 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr = 665 dmamap->dm_segs[seg].ds_addr; 666 sc->sc_txdesc[nexttx].hpc1_hdd_ctl = 667 dmamap->dm_segs[seg].ds_len; 668 } 669 sc->sc_txdesc[nexttx].hdd_descptr= 670 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 671 lasttx = nexttx; 672 totlen += dmamap->dm_segs[seg].ds_len; 673 } 674 675 /* Last descriptor gets end-of-packet */ 676 KASSERT(lasttx != -1); 677 if (sc->hpc_regs->revision == 3) 678 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 679 HPC3_HDD_CTL_EOPACKET; 680 else 681 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= 682 HPC1_HDD_CTL_EOPACKET; 683 684 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", 685 device_xname(sc->sc_dev), 686 sc->sc_nexttx, lasttx, 687 totlen)); 688 689 if (ifp->if_flags & IFF_DEBUG) { 690 printf(" transmit chain:\n"); 691 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 692 printf(" descriptor %d:\n", seg); 693 printf(" hdd_bufptr: 0x%08x\n", 694 (sc->hpc_regs->revision == 3) ? 695 sc->sc_txdesc[seg].hpc3_hdd_bufptr : 696 sc->sc_txdesc[seg].hpc1_hdd_bufptr); 697 printf(" hdd_ctl: 0x%08x\n", 698 (sc->hpc_regs->revision == 3) ? 699 sc->sc_txdesc[seg].hpc3_hdd_ctl: 700 sc->sc_txdesc[seg].hpc1_hdd_ctl); 701 printf(" hdd_descptr: 0x%08x\n", 702 sc->sc_txdesc[seg].hdd_descptr); 703 704 if (seg == lasttx) 705 break; 706 } 707 } 708 709 /* Sync the descriptors we're using. */ 710 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 711 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 712 713 /* Store a pointer to the packet so we can free it later */ 714 sc->sc_txmbuf[sc->sc_nexttx] = m0; 715 716 /* Advance the tx pointer. */ 717 sc->sc_nfreetx -= dmamap->dm_nsegs; 718 sc->sc_nexttx = nexttx; 719 } 720 721 /* All transmit descriptors used up, let upper layers know */ 722 if (sc->sc_nfreetx == 0) 723 ifp->if_flags |= IFF_OACTIVE; 724 725 if (sc->sc_nfreetx != ofree) { 726 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n", 727 device_xname(sc->sc_dev), lasttx - firsttx + 1, 728 firsttx, lasttx)); 729 730 /* 731 * Cause a transmit interrupt to happen on the 732 * last packet we enqueued, mark it as the last 733 * descriptor. 734 * 735 * HPC1_HDD_CTL_INTR will generate an interrupt on 736 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in 737 * addition to HPC3_HDD_CTL_INTR to interrupt. 738 */ 739 KASSERT(lasttx != -1); 740 if (sc->hpc_regs->revision == 3) { 741 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 742 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN; 743 } else { 744 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; 745 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |= 746 HPC1_HDD_CTL_EOCHAIN; 747 } 748 749 SQ_CDTXSYNC(sc, lasttx, 1, 750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 751 752 /* 753 * There is a potential race condition here if the HPC 754 * DMA channel is active and we try and either update 755 * the 'next descriptor' pointer in the HPC PIO space 756 * or the 'next descriptor' pointer in a previous desc- 757 * riptor. 758 * 759 * To avoid this, if the channel is active, we rely on 760 * the transmit interrupt routine noticing that there 761 * are more packets to send and restarting the HPC DMA 762 * engine, rather than mucking with the DMA state here. 763 */ 764 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 765 766 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) { 767 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status); 768 769 /* 770 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 771 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 772 */ 773 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &= 774 ~HPC3_HDD_CTL_EOCHAIN; 775 776 if (sc->hpc_regs->revision != 3) 777 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl 778 &= ~HPC1_HDD_CTL_INTR; 779 780 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 781 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 782 } else if (sc->hpc_regs->revision == 3) { 783 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 784 785 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc, 786 firsttx)); 787 788 /* Kick DMA channel into life */ 789 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE); 790 } else { 791 /* 792 * In the HPC1 case where transmit DMA is 793 * inactive, we can either kick off if 794 * the ring was previously empty, or call 795 * our transmit interrupt handler to 796 * figure out if the ring stopped short 797 * and restart at the right place. 798 */ 799 if (ofree == SQ_NTXDESC) { 800 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 801 802 sq_hpc_write(sc, HPC1_ENETX_NDBP, 803 SQ_CDTXADDR(sc, firsttx)); 804 sq_hpc_write(sc, HPC1_ENETX_CFXBP, 805 SQ_CDTXADDR(sc, firsttx)); 806 sq_hpc_write(sc, HPC1_ENETX_CBP, 807 SQ_CDTXADDR(sc, firsttx)); 808 809 /* Kick DMA channel into life */ 810 sq_hpc_write(sc, HPC1_ENETX_CTL, 811 HPC1_ENETX_CTL_ACTIVE); 812 } else 813 sq_txring_hpc1(sc); 814 } 815 816 /* Set a watchdog timer in case the chip flakes out. */ 817 ifp->if_timer = 5; 818 } 819} 820 821void 822sq_stop(struct ifnet *ifp, int disable) 823{ 824 int i; 825 struct sq_softc *sc = ifp->if_softc; 826 827 for (i =0; i < SQ_NTXDESC; i++) { 828 if (sc->sc_txmbuf[i] != NULL) { 829 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 830 m_freem(sc->sc_txmbuf[i]); 831 sc->sc_txmbuf[i] = NULL; 832 } 833 } 834 835 /* Clear Seeq transmit/receive command registers */ 836 sq_seeq_write(sc, SEEQ_TXCMD, 0); 837 sq_seeq_write(sc, SEEQ_RXCMD, 0); 838 839 sq_reset(sc); 840 841 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 842 ifp->if_timer = 0; 843} 844 845/* Device timeout/watchdog routine. */ 846void 847sq_watchdog(struct ifnet *ifp) 848{ 849 uint32_t status; 850 struct sq_softc *sc = ifp->if_softc; 851 852 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 853 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 854 "status %08x)\n", device_xname(sc->sc_dev), sc->sc_prevtx, 855 sc->sc_nexttx, sc->sc_nfreetx, status); 856 857 sq_trace_dump(sc); 858 859 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 860 sc->sq_trace_idx = 0; 861 862 ++ifp->if_oerrors; 863 864 sq_init(ifp); 865} 866 867static void 868sq_trace_dump(struct sq_softc *sc) 869{ 870 int i; 871 const char *act; 872 873 for (i = 0; i < sc->sq_trace_idx; i++) { 874 switch (sc->sq_trace[i].action) { 875 case SQ_RESET: act = "SQ_RESET"; break; 876 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break; 877 case SQ_START_DMA: act = "SQ_START_DMA"; break; 878 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break; 879 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break; 880 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break; 881 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break; 882 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break; 883 case SQ_IOCTL: act = "SQ_IOCTL"; break; 884 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break; 885 default: act = "UNKNOWN"; 886 } 887 888 printf("%s: [%03d] action %-16s buf %03d free %03d " 889 "status %08x line %d\n", device_xname(sc->sc_dev), i, act, 890 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf, 891 sc->sq_trace[i].status, sc->sq_trace[i].line); 892 } 893} 894 895static int 896sq_intr(void *arg) 897{ 898 struct sq_softc *sc = arg; 899 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 900 int handled = 0; 901 uint32_t stat; 902 903 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset); 904 905 if ((stat & 2) == 0) 906 SQ_DPRINTF(("%s: Unexpected interrupt!\n", 907 device_xname(sc->sc_dev))); 908 else 909 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2)); 910 911 /* 912 * If the interface isn't running, the interrupt couldn't 913 * possibly have come from us. 914 */ 915 if ((ifp->if_flags & IFF_RUNNING) == 0) 916 return 0; 917 918 sc->sq_intrcnt.ev_count++; 919 920 /* Always check for received packets */ 921 if (sq_rxintr(sc) != 0) 922 handled++; 923 924 /* Only handle transmit interrupts if we actually sent something */ 925 if (sc->sc_nfreetx < SQ_NTXDESC) { 926 sq_txintr(sc); 927 handled++; 928 } 929 930#if NRND > 0 931 if (handled) 932 rnd_add_uint32(&sc->rnd_source, stat); 933#endif 934 return (handled); 935} 936 937static int 938sq_rxintr(struct sq_softc *sc) 939{ 940 int count = 0; 941 struct mbuf* m; 942 int i, framelen; 943 uint8_t pktstat; 944 uint32_t status; 945 uint32_t ctl_reg; 946 int new_end, orig_end; 947 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 948 949 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 950 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | 951 BUS_DMASYNC_POSTWRITE); 952 953 /* 954 * If this is a CPU-owned buffer, we're at the end of the list. 955 */ 956 if (sc->hpc_regs->revision == 3) 957 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & 958 HPC3_HDD_CTL_OWN; 959 else 960 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl & 961 HPC1_HDD_CTL_OWN; 962 963 if (ctl_reg) { 964#if defined(SQ_DEBUG) 965 uint32_t reg; 966 967 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 968 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n", 969 device_xname(sc->sc_dev), i, reg)); 970#endif 971 break; 972 } 973 974 count++; 975 976 m = sc->sc_rxmbuf[i]; 977 framelen = m->m_ext.ext_size - 3; 978 if (sc->hpc_regs->revision == 3) 979 framelen -= 980 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl); 981 else 982 framelen -= 983 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl); 984 985 /* Now sync the actual packet data */ 986 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 987 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 988 989 pktstat = *((uint8_t*)m->m_data + framelen + 2); 990 991 if ((pktstat & RXSTAT_GOOD) == 0) { 992 ifp->if_ierrors++; 993 994 if (pktstat & RXSTAT_OFLOW) 995 printf("%s: receive FIFO overflow\n", 996 device_xname(sc->sc_dev)); 997 998 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 999 sc->sc_rxmap[i]->dm_mapsize, 1000 BUS_DMASYNC_PREREAD); 1001 SQ_INIT_RXDESC(sc, i); 1002 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n", 1003 device_xname(sc->sc_dev), i)); 1004 continue; 1005 } 1006 1007 if (sq_add_rxbuf(sc, i) != 0) { 1008 ifp->if_ierrors++; 1009 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 1010 sc->sc_rxmap[i]->dm_mapsize, 1011 BUS_DMASYNC_PREREAD); 1012 SQ_INIT_RXDESC(sc, i); 1013 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() " 1014 "failed\n", device_xname(sc->sc_dev), i)); 1015 continue; 1016 } 1017 1018 1019 m->m_data += 2; 1020 m->m_pkthdr.rcvif = ifp; 1021 m->m_pkthdr.len = m->m_len = framelen; 1022 1023 ifp->if_ipackets++; 1024 1025 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n", 1026 device_xname(sc->sc_dev), i, framelen)); 1027 1028 bpf_mtap(ifp, m); 1029 (*ifp->if_input)(ifp, m); 1030 } 1031 1032 1033 /* If anything happened, move ring start/end pointers to new spot */ 1034 if (i != sc->sc_nextrx) { 1035 /* 1036 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 1037 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 1038 */ 1039 1040 new_end = SQ_PREVRX(i); 1041 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN; 1042 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 1043 BUS_DMASYNC_PREWRITE); 1044 1045 orig_end = SQ_PREVRX(sc->sc_nextrx); 1046 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; 1047 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 1048 BUS_DMASYNC_PREWRITE); 1049 1050 sc->sc_nextrx = i; 1051 } 1052 1053 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 1054 1055 /* If receive channel is stopped, restart it... */ 1056 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) { 1057 /* Pass the start of the receive ring to the HPC */ 1058 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 1059 sc->sc_nextrx)); 1060 1061 /* And turn on the HPC ethernet receive channel */ 1062 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 1063 sc->hpc_regs->enetr_ctl_active); 1064 } 1065 1066 return count; 1067} 1068 1069static int 1070sq_txintr(struct sq_softc *sc) 1071{ 1072 int shift = 0; 1073 uint32_t status, tmp; 1074 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1075 1076 if (sc->hpc_regs->revision != 3) 1077 shift = 16; 1078 1079 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift; 1080 1081 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status); 1082 1083 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD; 1084 if ((status & tmp) == 0) { 1085 if (status & TXSTAT_COLL) 1086 ifp->if_collisions++; 1087 1088 if (status & TXSTAT_UFLOW) { 1089 printf("%s: transmit underflow\n", 1090 device_xname(sc->sc_dev)); 1091 ifp->if_oerrors++; 1092 } 1093 1094 if (status & TXSTAT_16COLL) { 1095 printf("%s: max collisions reached\n", 1096 device_xname(sc->sc_dev)); 1097 ifp->if_oerrors++; 1098 ifp->if_collisions += 16; 1099 } 1100 } 1101 1102 /* prevtx now points to next xmit packet not yet finished */ 1103 if (sc->hpc_regs->revision == 3) 1104 sq_txring_hpc3(sc); 1105 else 1106 sq_txring_hpc1(sc); 1107 1108 /* If we have buffers free, let upper layers know */ 1109 if (sc->sc_nfreetx > 0) 1110 ifp->if_flags &= ~IFF_OACTIVE; 1111 1112 /* If all packets have left the coop, cancel watchdog */ 1113 if (sc->sc_nfreetx == SQ_NTXDESC) 1114 ifp->if_timer = 0; 1115 1116 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status); 1117 sq_start(ifp); 1118 1119 return 1; 1120} 1121 1122/* 1123 * Reclaim used transmit descriptors and restart the transmit DMA 1124 * engine if necessary. 1125 */ 1126static void 1127sq_txring_hpc1(struct sq_softc *sc) 1128{ 1129 /* 1130 * HPC1 doesn't tag transmitted descriptors, however, 1131 * the NDBP register points to the next descriptor that 1132 * has not yet been processed. If DMA is not in progress, 1133 * we can safely reclaim all descriptors up to NDBP, and, 1134 * if necessary, restart DMA at NDBP. Otherwise, if DMA 1135 * is active, we can only safely reclaim up to CBP. 1136 * 1137 * For now, we'll only reclaim on inactive DMA and assume 1138 * that a sufficiently large ring keeps us out of trouble. 1139 */ 1140 uint32_t reclaimto, status; 1141 int reclaimall, i = sc->sc_prevtx; 1142 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1143 1144 status = sq_hpc_read(sc, HPC1_ENETX_CTL); 1145 if (status & HPC1_ENETX_CTL_ACTIVE) { 1146 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1147 return; 1148 } else 1149 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP); 1150 1151 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto) 1152 reclaimall = 1; 1153 else 1154 reclaimall = 0; 1155 1156 while (sc->sc_nfreetx < SQ_NTXDESC) { 1157 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall) 1158 break; 1159 1160 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1161 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1162 1163 /* Sync the packet data, unload DMA map, free mbuf */ 1164 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 1165 sc->sc_txmap[i]->dm_mapsize, 1166 BUS_DMASYNC_POSTWRITE); 1167 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1168 m_freem(sc->sc_txmbuf[i]); 1169 sc->sc_txmbuf[i] = NULL; 1170 1171 ifp->if_opackets++; 1172 sc->sc_nfreetx++; 1173 1174 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1175 1176 i = SQ_NEXTTX(i); 1177 } 1178 1179 if (sc->sc_nfreetx < SQ_NTXDESC) { 1180 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1181 1182 KASSERT(reclaimto == SQ_CDTXADDR(sc, i)); 1183 1184 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto); 1185 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto); 1186 1187 /* Kick DMA channel into life */ 1188 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE); 1189 1190 /* 1191 * Set a watchdog timer in case the chip 1192 * flakes out. 1193 */ 1194 ifp->if_timer = 5; 1195 } 1196 1197 sc->sc_prevtx = i; 1198} 1199 1200/* 1201 * Reclaim used transmit descriptors and restart the transmit DMA 1202 * engine if necessary. 1203 */ 1204static void 1205sq_txring_hpc3(struct sq_softc *sc) 1206{ 1207 /* 1208 * HPC3 tags descriptors with a bit once they've been 1209 * transmitted. We need only free each XMITDONE'd 1210 * descriptor, and restart the DMA engine if any 1211 * descriptors are left over. 1212 */ 1213 int i; 1214 uint32_t status = 0; 1215 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1216 1217 i = sc->sc_prevtx; 1218 while (sc->sc_nfreetx < SQ_NTXDESC) { 1219 /* 1220 * Check status first so we don't end up with a case of 1221 * the buffer not being finished while the DMA channel 1222 * has gone idle. 1223 */ 1224 status = sq_hpc_read(sc, HPC3_ENETX_CTL); 1225 1226 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1227 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1228 1229 /* Check for used descriptor and restart DMA chain if needed */ 1230 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) { 1231 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) { 1232 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1233 1234 sq_hpc_write(sc, HPC3_ENETX_NDBP, 1235 SQ_CDTXADDR(sc, i)); 1236 1237 /* Kick DMA channel into life */ 1238 sq_hpc_write(sc, HPC3_ENETX_CTL, 1239 HPC3_ENETX_CTL_ACTIVE); 1240 1241 /* 1242 * Set a watchdog timer in case the chip 1243 * flakes out. 1244 */ 1245 ifp->if_timer = 5; 1246 } else 1247 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1248 break; 1249 } 1250 1251 /* Sync the packet data, unload DMA map, free mbuf */ 1252 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 1253 sc->sc_txmap[i]->dm_mapsize, 1254 BUS_DMASYNC_POSTWRITE); 1255 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1256 m_freem(sc->sc_txmbuf[i]); 1257 sc->sc_txmbuf[i] = NULL; 1258 1259 ifp->if_opackets++; 1260 sc->sc_nfreetx++; 1261 1262 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1263 i = SQ_NEXTTX(i); 1264 } 1265 1266 sc->sc_prevtx = i; 1267} 1268 1269void 1270sq_reset(struct sq_softc *sc) 1271{ 1272 /* Stop HPC dma channels */ 1273 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0); 1274 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0); 1275 1276 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3); 1277 delay(20); 1278 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0); 1279} 1280 1281/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1282int 1283sq_add_rxbuf(struct sq_softc *sc, int idx) 1284{ 1285 int err; 1286 struct mbuf *m; 1287 1288 MGETHDR(m, M_DONTWAIT, MT_DATA); 1289 if (m == NULL) 1290 return (ENOBUFS); 1291 1292 MCLGET(m, M_DONTWAIT); 1293 if ((m->m_flags & M_EXT) == 0) { 1294 m_freem(m); 1295 return (ENOBUFS); 1296 } 1297 1298 if (sc->sc_rxmbuf[idx] != NULL) 1299 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1300 1301 sc->sc_rxmbuf[idx] = m; 1302 1303 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1304 m->m_ext.ext_buf, m->m_ext.ext_size, 1305 NULL, BUS_DMA_NOWAIT)) != 0) { 1306 printf("%s: can't load rx DMA map %d, error = %d\n", 1307 device_xname(sc->sc_dev), idx, err); 1308 panic("sq_add_rxbuf"); /* XXX */ 1309 } 1310 1311 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1312 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1313 1314 SQ_INIT_RXDESC(sc, idx); 1315 1316 return 0; 1317} 1318 1319void 1320sq_dump_buffer(paddr_t addr, psize_t len) 1321{ 1322 u_int i; 1323 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1(addr); 1324 1325 if (len == 0) 1326 return; 1327 1328 printf("%p: ", physaddr); 1329 1330 for (i = 0; i < len; i++) { 1331 printf("%02x ", *(physaddr + i) & 0xff); 1332 if ((i % 16) == 15 && i != len - 1) 1333 printf("\n%p: ", physaddr + i); 1334 } 1335 1336 printf("\n"); 1337} 1338