if_sq.c revision 1.59
1/* $NetBSD: if_sq.c,v 1.59 2024/06/29 12:11:11 riastradh Exp $ */ 2 3/* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.59 2024/06/29 12:11:11 riastradh Exp $"); 37 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/device.h> 42#include <sys/callout.h> 43#include <sys/mbuf.h> 44#include <sys/kernel.h> 45#include <sys/socket.h> 46#include <sys/ioctl.h> 47#include <sys/errno.h> 48#include <sys/syslog.h> 49 50#include <uvm/uvm_extern.h> 51 52#include <machine/endian.h> 53 54#include <net/if.h> 55#include <net/if_dl.h> 56#include <net/if_media.h> 57#include <net/if_ether.h> 58 59#include <net/bpf.h> 60 61#include <sys/bus.h> 62#include <machine/intr.h> 63#include <machine/sysconf.h> 64 65#include <dev/ic/seeq8003reg.h> 66 67#include <sgimips/hpc/sqvar.h> 68#include <sgimips/hpc/hpcvar.h> 69#include <sgimips/hpc/hpcreg.h> 70 71#include <dev/arcbios/arcbios.h> 72#include <dev/arcbios/arcbiosvar.h> 73 74#define static 75 76/* 77 * Short TODO list: 78 * (1) Do counters for bad-RX packets. 79 * (2) Allow multi-segment transmits, instead of copying to a single, 80 * contiguous mbuf. 81 * (3) Verify sq_stop() turns off enough stuff; I was still getting 82 * seeq interrupts after sq_stop(). 83 * (4) Implement EDLC modes: especially packet auto-pad and simplex 84 * mode. 85 * (5) Should the driver filter out its own transmissions in non-EDLC 86 * mode? 87 * (6) Multicast support -- multicast filter, address management, ... 88 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need 89 * to figure out if RB0 is read-only as stated in one spot in the 90 * HPC spec or read-write (ie, is the 'write a one to clear it') 91 * the correct thing? 92 */ 93 94#if defined(SQ_DEBUG) 95 int sq_debug = 0; 96 #define SQ_DPRINTF(x) if (sq_debug) printf x 97#else 98 #define SQ_DPRINTF(x) 99#endif 100 101static int sq_match(device_t, cfdata_t, void *); 102static void sq_attach(device_t, device_t, void *); 103static int sq_init(struct ifnet *); 104static void sq_start(struct ifnet *); 105static void sq_stop(struct ifnet *, int); 106static void sq_watchdog(struct ifnet *); 107static int sq_ioctl(struct ifnet *, u_long, void *); 108 109static void sq_set_filter(struct sq_softc *); 110static int sq_intr(void *); 111static int sq_rxintr(struct sq_softc *); 112static int sq_txintr(struct sq_softc *); 113static void sq_txring_hpc1(struct sq_softc *); 114static void sq_txring_hpc3(struct sq_softc *); 115static void sq_reset(struct sq_softc *); 116static int sq_add_rxbuf(struct sq_softc *, int); 117static void sq_dump_buffer(paddr_t, psize_t); 118static void sq_trace_dump(struct sq_softc *); 119 120CFATTACH_DECL_NEW(sq, sizeof(struct sq_softc), 121 sq_match, sq_attach, NULL, NULL); 122 123#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 124 125#define sq_seeq_read(sc, off) \ 126 bus_space_read_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3) 127#define sq_seeq_write(sc, off, val) \ 128 bus_space_write_1(sc->sc_regt, sc->sc_regh, (off << 2) + 3, val) 129 130#define sq_hpc_read(sc, off) \ 131 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off) 132#define sq_hpc_write(sc, off, val) \ 133 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val) 134 135/* MAC address offset for non-onboard implementations */ 136#define SQ_HPC_EEPROM_ENADDR 250 137 138#define SGI_OUI_0 0x08 139#define SGI_OUI_1 0x00 140#define SGI_OUI_2 0x69 141 142static int 143sq_match(device_t parent, cfdata_t cf, void *aux) 144{ 145 struct hpc_attach_args *ha = aux; 146 147 if (strcmp(ha->ha_name, cf->cf_name) == 0) { 148 vaddr_t reset, txstat; 149 150 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 151 ha->ha_dmaoff + ha->hpc_regs->enetr_reset); 152 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 153 ha->ha_devoff + (SEEQ_TXSTAT << 2)); 154 155 if (platform.badaddr((void *)reset, sizeof(reset))) 156 return 0; 157 158 *(volatile uint32_t *)reset = 0x1; 159 delay(20); 160 *(volatile uint32_t *)reset = 0x0; 161 162 if (platform.badaddr((void *)txstat, sizeof(txstat))) 163 return 0; 164 165 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW) 166 return 1; 167 } 168 169 return 0; 170} 171 172static void 173sq_attach(device_t parent, device_t self, void *aux) 174{ 175 int i, err; 176 const char* macaddr; 177 struct sq_softc *sc = device_private(self); 178 struct hpc_attach_args *haa = aux; 179 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 180 181 sc->sc_dev = self; 182 sc->sc_hpct = haa->ha_st; 183 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */ 184 185 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 186 haa->ha_dmaoff, sc->hpc_regs->enet_regs_size, 187 &sc->sc_hpch)) != 0) { 188 printf(": unable to map HPC DMA registers, error = %d\n", err); 189 goto fail_0; 190 } 191 192 sc->sc_regt = haa->ha_st; 193 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 194 haa->ha_devoff, sc->hpc_regs->enet_devregs_size, 195 &sc->sc_regh)) != 0) { 196 printf(": unable to map Seeq registers, error = %d\n", err); 197 goto fail_0; 198 } 199 200 sc->sc_dmat = haa->ha_dmat; 201 202 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 203 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 1, &sc->sc_ncdseg, 204 BUS_DMA_NOWAIT)) != 0) { 205 printf(": unable to allocate control data, error = %d\n", err); 206 goto fail_0; 207 } 208 209 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 210 sizeof(struct sq_control), (void **)&sc->sc_control, 211 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 212 printf(": unable to map control data, error = %d\n", err); 213 goto fail_1; 214 } 215 216 if ((err = bus_dmamap_create(sc->sc_dmat, 217 sizeof(struct sq_control), 1, sizeof(struct sq_control), PAGE_SIZE, 218 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 219 printf(": unable to create DMA map for control data, error " 220 "= %d\n", err); 221 goto fail_2; 222 } 223 224 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, 225 sc->sc_control, sizeof(struct sq_control), NULL, 226 BUS_DMA_NOWAIT)) != 0) { 227 printf(": unable to load DMA map for control data, error " 228 "= %d\n", err); 229 goto fail_3; 230 } 231 232 memset(sc->sc_control, 0, sizeof(struct sq_control)); 233 234 /* Create transmit buffer DMA maps */ 235 for (i = 0; i < SQ_NTXDESC; i++) { 236 if ((err = bus_dmamap_create(sc->sc_dmat, 237 MCLBYTES, 1, MCLBYTES, 0, 238 BUS_DMA_NOWAIT, &sc->sc_txmap[i])) != 0) { 239 printf(": unable to create tx DMA map %d, error = %d\n", 240 i, err); 241 goto fail_4; 242 } 243 } 244 245 /* Create receive buffer DMA maps */ 246 for (i = 0; i < SQ_NRXDESC; i++) { 247 if ((err = bus_dmamap_create(sc->sc_dmat, 248 MCLBYTES, 1, MCLBYTES, 0, 249 BUS_DMA_NOWAIT, &sc->sc_rxmap[i])) != 0) { 250 printf(": unable to create rx DMA map %d, error = %d\n", 251 i, err); 252 goto fail_5; 253 } 254 } 255 256 /* Pre-allocate the receive buffers. */ 257 for (i = 0; i < SQ_NRXDESC; i++) { 258 if ((err = sq_add_rxbuf(sc, i)) != 0) { 259 printf(": unable to allocate or map rx buffer %d\n," 260 " error = %d\n", i, err); 261 goto fail_6; 262 } 263 } 264 265 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR], 266 ETHER_ADDR_LEN); 267 268 /* 269 * If our mac address is bogus, obtain it from ARCBIOS. This will 270 * be true of the onboard HPC3 on IP22, since there is no eeprom, 271 * but rather the DS1386 RTC's battery-backed ram is used. 272 */ 273 if (sc->sc_enaddr[0] != SGI_OUI_0 || 274 sc->sc_enaddr[1] != SGI_OUI_1 || 275 sc->sc_enaddr[2] != SGI_OUI_2) { 276 macaddr = arcbios_GetEnvironmentVariable("eaddr"); 277 if (macaddr == NULL) { 278 printf(": unable to get MAC address!\n"); 279 goto fail_6; 280 } 281 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr); 282 } 283 284 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 285 device_xname(self), "intr"); 286 287 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 288 printf(": unable to establish interrupt!\n"); 289 goto fail_6; 290 } 291 292 /* Reset the chip to a known state. */ 293 sq_reset(sc); 294 295 /* 296 * Determine if we're an 8003 or 80c03 by setting the first 297 * MAC address register to non-zero, and then reading it back. 298 * If it's zero, we have an 80c03, because we will have read 299 * the TxCollLSB register. 300 */ 301 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5); 302 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0) 303 sc->sc_type = SQ_TYPE_80C03; 304 else 305 sc->sc_type = SQ_TYPE_8003; 306 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00); 307 308 printf(": SGI Seeq %s\n", 309 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 310 311 printf("%s: Ethernet address %s\n", 312 device_xname(self), ether_sprintf(sc->sc_enaddr)); 313 314 strcpy(ifp->if_xname, device_xname(self)); 315 ifp->if_softc = sc; 316 ifp->if_mtu = ETHERMTU; 317 ifp->if_init = sq_init; 318 ifp->if_stop = sq_stop; 319 ifp->if_start = sq_start; 320 ifp->if_ioctl = sq_ioctl; 321 ifp->if_watchdog = sq_watchdog; 322 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST; 323 IFQ_SET_READY(&ifp->if_snd); 324 325 if_attach(ifp); 326 if_deferred_start_init(ifp, NULL); 327 ether_ifattach(ifp, sc->sc_enaddr); 328 329 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 330 /* Done! */ 331 return; 332 333 /* 334 * Free any resources we've allocated during the failed attach 335 * attempt. Do this in reverse order and fall through. 336 */ 337 fail_6: 338 for (i = 0; i < SQ_NRXDESC; i++) { 339 if (sc->sc_rxmbuf[i] != NULL) { 340 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 341 m_freem(sc->sc_rxmbuf[i]); 342 } 343 } 344 fail_5: 345 for (i = 0; i < SQ_NRXDESC; i++) { 346 if (sc->sc_rxmap[i] != NULL) 347 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 348 } 349 fail_4: 350 for (i = 0; i < SQ_NTXDESC; i++) { 351 if (sc->sc_txmap[i] != NULL) 352 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 353 } 354 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 355 fail_3: 356 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 357 fail_2: 358 bus_dmamem_unmap(sc->sc_dmat, 359 (void *)sc->sc_control, sizeof(struct sq_control)); 360 fail_1: 361 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 362 fail_0: 363 return; 364} 365 366/* Set up data to get the interface up and running. */ 367int 368sq_init(struct ifnet *ifp) 369{ 370 int i; 371 struct sq_softc *sc = ifp->if_softc; 372 373 /* Cancel any in-progress I/O */ 374 sq_stop(ifp, 0); 375 376 sc->sc_nextrx = 0; 377 378 sc->sc_nfreetx = SQ_NTXDESC; 379 sc->sc_nexttx = sc->sc_prevtx = 0; 380 381 SQ_TRACE(SQ_RESET, sc, 0, 0); 382 383 /* Set into 8003 mode, bank 0 to program ethernet address */ 384 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0); 385 386 /* Now write the address */ 387 for (i = 0; i < ETHER_ADDR_LEN; i++) 388 sq_seeq_write(sc, i, sc->sc_enaddr[i]); 389 390 sc->sc_rxcmd = 391 RXCMD_IE_CRC | 392 RXCMD_IE_DRIB | 393 RXCMD_IE_SHORT | 394 RXCMD_IE_END | 395 RXCMD_IE_GOOD; 396 397 /* 398 * Set the receive filter -- this will add some bits to the 399 * prototype RXCMD register. Do this before setting the 400 * transmit config register, since we might need to switch 401 * banks. 402 */ 403 sq_set_filter(sc); 404 405 /* Set up Seeq transmit command register */ 406 sq_seeq_write(sc, SEEQ_TXCMD, 407 TXCMD_IE_UFLOW | 408 TXCMD_IE_COLL | 409 TXCMD_IE_16COLL | 410 TXCMD_IE_GOOD); 411 412 /* Now write the receive command register. */ 413 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd); 414 415 /* 416 * Set up HPC ethernet PIO and DMA configurations. 417 * 418 * The PROM appears to do most of this for the onboard HPC3, but 419 * not for the Challenge S's IOPLUS chip. We copy how the onboard 420 * chip is configured and assume that it's correct for both. 421 */ 422 if (sc->hpc_regs->revision == 3) { 423 uint32_t dmareg, pioreg; 424 425 pioreg = 426 HPC3_ENETR_PIOCFG_P1(1) | 427 HPC3_ENETR_PIOCFG_P2(6) | 428 HPC3_ENETR_PIOCFG_P3(1); 429 430 dmareg = 431 HPC3_ENETR_DMACFG_D1(6) | 432 HPC3_ENETR_DMACFG_D2(2) | 433 HPC3_ENETR_DMACFG_D3(0) | 434 HPC3_ENETR_DMACFG_FIX_RXDC | 435 HPC3_ENETR_DMACFG_FIX_INTR | 436 HPC3_ENETR_DMACFG_FIX_EOP | 437 HPC3_ENETR_DMACFG_TIMEOUT; 438 439 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg); 440 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg); 441 } 442 443 /* Pass the start of the receive ring to the HPC */ 444 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0)); 445 446 /* And turn on the HPC ethernet receive channel */ 447 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 448 sc->hpc_regs->enetr_ctl_active); 449 450 /* 451 * Turn off delayed receive interrupts on HPC1. 452 * (see Hollywood HPC Specification 2.1.4.3) 453 */ 454 if (sc->hpc_regs->revision != 3) 455 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF); 456 457 ifp->if_flags |= IFF_RUNNING; 458 459 return 0; 460} 461 462static void 463sq_set_filter(struct sq_softc *sc) 464{ 465 struct ethercom *ec = &sc->sc_ethercom; 466 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 467 struct ether_multi *enm; 468 struct ether_multistep step; 469 470 /* 471 * Check for promiscuous mode. Also implies 472 * all-multicast. 473 */ 474 if (ifp->if_flags & IFF_PROMISC) { 475 sc->sc_rxcmd |= RXCMD_REC_ALL; 476 ifp->if_flags |= IFF_ALLMULTI; 477 return; 478 } 479 480 /* 481 * The 8003 has no hash table. If we have any multicast 482 * addresses on the list, enable reception of all multicast 483 * frames. 484 * 485 * XXX The 80c03 has a hash table. We should use it. 486 */ 487 488 ETHER_FIRST_MULTI(step, ec, enm); 489 490 if (enm == NULL) { 491 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 492 sc->sc_rxcmd |= RXCMD_REC_BROAD; 493 494 ifp->if_flags &= ~IFF_ALLMULTI; 495 return; 496 } 497 498 sc->sc_rxcmd |= RXCMD_REC_MULTI; 499 ifp->if_flags |= IFF_ALLMULTI; 500} 501 502int 503sq_ioctl(struct ifnet *ifp, u_long cmd, void *data) 504{ 505 int s, error = 0; 506 507 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0); 508 509 s = splnet(); 510 511 error = ether_ioctl(ifp, cmd, data); 512 if (error == ENETRESET) { 513 /* 514 * Multicast list has changed; set the hardware filter 515 * accordingly. 516 */ 517 if (ifp->if_flags & IFF_RUNNING) 518 error = sq_init(ifp); 519 else 520 error = 0; 521 } 522 523 splx(s); 524 return error; 525} 526 527void 528sq_start(struct ifnet *ifp) 529{ 530 struct sq_softc *sc = ifp->if_softc; 531 uint32_t status; 532 struct mbuf *m0, *m; 533 bus_dmamap_t dmamap; 534 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg; 535 536 if ((ifp->if_flags & IFF_RUNNING) == 0) 537 return; 538 539 /* 540 * Remember the previous number of free descriptors and 541 * the first descriptor we'll use. 542 */ 543 ofree = sc->sc_nfreetx; 544 firsttx = sc->sc_nexttx; 545 546 /* 547 * Loop through the send queue, setting up transmit descriptors 548 * until we drain the queue, or use up all available transmit 549 * descriptors. 550 */ 551 while (sc->sc_nfreetx != 0) { 552 /* 553 * Grab a packet off the queue. 554 */ 555 IFQ_POLL(&ifp->if_snd, m0); 556 if (m0 == NULL) 557 break; 558 m = NULL; 559 560 dmamap = sc->sc_txmap[sc->sc_nexttx]; 561 562 /* 563 * Load the DMA map. If this fails, the packet either 564 * didn't fit in the allotted number of segments, or we were 565 * short on resources. In this case, we'll copy and try 566 * again. 567 * Also copy it if we need to pad, so that we are sure there 568 * is room for the pad buffer. 569 * XXX the right way of doing this is to use a static buffer 570 * for padding and adding it to the transmit descriptor (see 571 * sys/dev/pci/if_tl.c for example). We can't do this here yet 572 * because we can't send packets with more than one fragment. 573 */ 574 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 575 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 576 BUS_DMA_NOWAIT) != 0) { 577 MGETHDR(m, M_DONTWAIT, MT_DATA); 578 if (m == NULL) { 579 printf("%s: unable to allocate Tx mbuf\n", 580 device_xname(sc->sc_dev)); 581 break; 582 } 583 if (m0->m_pkthdr.len > MHLEN) { 584 MCLGET(m, M_DONTWAIT); 585 if ((m->m_flags & M_EXT) == 0) { 586 printf("%s: unable to allocate Tx " 587 "cluster\n", 588 device_xname(sc->sc_dev)); 589 m_freem(m); 590 break; 591 } 592 } 593 594 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 595 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 596 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 597 ETHER_PAD_LEN - m0->m_pkthdr.len); 598 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 599 } else 600 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 601 602 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 603 m, BUS_DMA_NOWAIT)) != 0) { 604 printf("%s: unable to load Tx buffer, " 605 "error = %d\n", 606 device_xname(sc->sc_dev), err); 607 break; 608 } 609 } 610 611 /* 612 * Ensure we have enough descriptors free to describe 613 * the packet. 614 */ 615 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 616 /* 617 * Not enough free descriptors to transmit this 618 * packet. We haven't committed to anything yet, 619 * so just unload the DMA map, put the packet 620 * back on the queue, and punt. 621 * 622 * XXX We could allocate an mbuf and copy, but 623 * XXX it is worth it? 624 */ 625 bus_dmamap_unload(sc->sc_dmat, dmamap); 626 if (m != NULL) 627 m_freem(m); 628 break; 629 } 630 631 IFQ_DEQUEUE(&ifp->if_snd, m0); 632 /* 633 * Pass the packet to any BPF listeners. 634 */ 635 bpf_mtap(ifp, m0, BPF_D_OUT); 636 if (m != NULL) { 637 m_freem(m0); 638 m0 = m; 639 } 640 641 /* 642 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 643 */ 644 645 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0); 646 647 /* Sync the DMA map. */ 648 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 649 BUS_DMASYNC_PREWRITE); 650 651 /* 652 * Initialize the transmit descriptors. 653 */ 654 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 655 seg < dmamap->dm_nsegs; 656 seg++, nexttx = SQ_NEXTTX(nexttx)) { 657 if (sc->hpc_regs->revision == 3) { 658 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr = 659 dmamap->dm_segs[seg].ds_addr; 660 sc->sc_txdesc[nexttx].hpc3_hdd_ctl = 661 dmamap->dm_segs[seg].ds_len; 662 } else { 663 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr = 664 dmamap->dm_segs[seg].ds_addr; 665 sc->sc_txdesc[nexttx].hpc1_hdd_ctl = 666 dmamap->dm_segs[seg].ds_len; 667 } 668 sc->sc_txdesc[nexttx].hdd_descptr = 669 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 670 lasttx = nexttx; 671 totlen += dmamap->dm_segs[seg].ds_len; 672 } 673 674 /* Last descriptor gets end-of-packet */ 675 KASSERT(lasttx != -1); 676 if (sc->hpc_regs->revision == 3) 677 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 678 HPC3_HDD_CTL_EOPACKET; 679 else 680 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= 681 HPC1_HDD_CTL_EOPACKET; 682 683 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", 684 device_xname(sc->sc_dev), sc->sc_nexttx, lasttx, totlen)); 685 686 if (ifp->if_flags & IFF_DEBUG) { 687 printf(" transmit chain:\n"); 688 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 689 printf(" descriptor %d:\n", seg); 690 printf(" hdd_bufptr: 0x%08x\n", 691 (sc->hpc_regs->revision == 3) ? 692 sc->sc_txdesc[seg].hpc3_hdd_bufptr : 693 sc->sc_txdesc[seg].hpc1_hdd_bufptr); 694 printf(" hdd_ctl: 0x%08x\n", 695 (sc->hpc_regs->revision == 3) ? 696 sc->sc_txdesc[seg].hpc3_hdd_ctl: 697 sc->sc_txdesc[seg].hpc1_hdd_ctl); 698 printf(" hdd_descptr: 0x%08x\n", 699 sc->sc_txdesc[seg].hdd_descptr); 700 701 if (seg == lasttx) 702 break; 703 } 704 } 705 706 /* Sync the descriptors we're using. */ 707 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 708 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 709 710 /* Store a pointer to the packet so we can free it later */ 711 sc->sc_txmbuf[sc->sc_nexttx] = m0; 712 713 /* Advance the tx pointer. */ 714 sc->sc_nfreetx -= dmamap->dm_nsegs; 715 sc->sc_nexttx = nexttx; 716 } 717 718 if (sc->sc_nfreetx != ofree) { 719 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n", 720 device_xname(sc->sc_dev), lasttx - firsttx + 1, 721 firsttx, lasttx)); 722 723 /* 724 * Cause a transmit interrupt to happen on the 725 * last packet we enqueued, mark it as the last 726 * descriptor. 727 * 728 * HPC1_HDD_CTL_INTR will generate an interrupt on 729 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in 730 * addition to HPC3_HDD_CTL_INTR to interrupt. 731 */ 732 KASSERT(lasttx != -1); 733 if (sc->hpc_regs->revision == 3) { 734 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 735 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN; 736 } else { 737 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; 738 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |= 739 HPC1_HDD_CTL_EOCHAIN; 740 } 741 742 SQ_CDTXSYNC(sc, lasttx, 1, 743 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 744 745 /* 746 * There is a potential race condition here if the HPC 747 * DMA channel is active and we try and either update 748 * the 'next descriptor' pointer in the HPC PIO space 749 * or the 'next descriptor' pointer in a previous desc- 750 * riptor. 751 * 752 * To avoid this, if the channel is active, we rely on 753 * the transmit interrupt routine noticing that there 754 * are more packets to send and restarting the HPC DMA 755 * engine, rather than mucking with the DMA state here. 756 */ 757 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 758 759 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) { 760 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status); 761 762 /* 763 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 764 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 765 */ 766 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &= 767 ~HPC3_HDD_CTL_EOCHAIN; 768 769 if (sc->hpc_regs->revision != 3) 770 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl 771 &= ~HPC1_HDD_CTL_INTR; 772 773 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 774 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 775 } else if (sc->hpc_regs->revision == 3) { 776 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 777 778 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc, 779 firsttx)); 780 781 /* Kick DMA channel into life */ 782 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE); 783 } else { 784 /* 785 * In the HPC1 case where transmit DMA is 786 * inactive, we can either kick off if 787 * the ring was previously empty, or call 788 * our transmit interrupt handler to 789 * figure out if the ring stopped short 790 * and restart at the right place. 791 */ 792 if (ofree == SQ_NTXDESC) { 793 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 794 795 sq_hpc_write(sc, HPC1_ENETX_NDBP, 796 SQ_CDTXADDR(sc, firsttx)); 797 sq_hpc_write(sc, HPC1_ENETX_CFXBP, 798 SQ_CDTXADDR(sc, firsttx)); 799 sq_hpc_write(sc, HPC1_ENETX_CBP, 800 SQ_CDTXADDR(sc, firsttx)); 801 802 /* Kick DMA channel into life */ 803 sq_hpc_write(sc, HPC1_ENETX_CTL, 804 HPC1_ENETX_CTL_ACTIVE); 805 } else 806 sq_txring_hpc1(sc); 807 } 808 809 /* Set a watchdog timer in case the chip flakes out. */ 810 ifp->if_timer = 5; 811 } 812} 813 814void 815sq_stop(struct ifnet *ifp, int disable) 816{ 817 int i; 818 struct sq_softc *sc = ifp->if_softc; 819 820 for (i = 0; i < SQ_NTXDESC; i++) { 821 if (sc->sc_txmbuf[i] != NULL) { 822 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 823 m_freem(sc->sc_txmbuf[i]); 824 sc->sc_txmbuf[i] = NULL; 825 } 826 } 827 828 /* Clear Seeq transmit/receive command registers */ 829 sq_seeq_write(sc, SEEQ_TXCMD, 0); 830 sq_seeq_write(sc, SEEQ_RXCMD, 0); 831 832 sq_reset(sc); 833 834 ifp->if_flags &= ~IFF_RUNNING; 835 ifp->if_timer = 0; 836} 837 838/* Device timeout/watchdog routine. */ 839void 840sq_watchdog(struct ifnet *ifp) 841{ 842 uint32_t status; 843 struct sq_softc *sc = ifp->if_softc; 844 845 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 846 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 847 "status %08x)\n", device_xname(sc->sc_dev), sc->sc_prevtx, 848 sc->sc_nexttx, sc->sc_nfreetx, status); 849 850 sq_trace_dump(sc); 851 852 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 853 sc->sq_trace_idx = 0; 854 855 if_statinc(ifp, if_oerrors); 856 857 sq_init(ifp); 858} 859 860static void 861sq_trace_dump(struct sq_softc *sc) 862{ 863 int i; 864 const char *act; 865 866 for (i = 0; i < sc->sq_trace_idx; i++) { 867 switch (sc->sq_trace[i].action) { 868 case SQ_RESET: act = "SQ_RESET"; break; 869 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break; 870 case SQ_START_DMA: act = "SQ_START_DMA"; break; 871 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break; 872 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break; 873 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break; 874 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break; 875 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break; 876 case SQ_IOCTL: act = "SQ_IOCTL"; break; 877 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break; 878 default: act = "UNKNOWN"; 879 } 880 881 printf("%s: [%03d] action %-16s buf %03d free %03d " 882 "status %08x line %d\n", device_xname(sc->sc_dev), i, act, 883 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf, 884 sc->sq_trace[i].status, sc->sq_trace[i].line); 885 } 886} 887 888static int 889sq_intr(void *arg) 890{ 891 struct sq_softc *sc = arg; 892 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 893 int handled = 0; 894 uint32_t stat; 895 896 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset); 897 898 if ((stat & 2) == 0) { 899 SQ_DPRINTF(("%s: Unexpected interrupt!\n", 900 device_xname(sc->sc_dev))); 901 } else 902 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2)); 903 904 /* 905 * If the interface isn't running, the interrupt couldn't 906 * possibly have come from us. 907 */ 908 if ((ifp->if_flags & IFF_RUNNING) == 0) 909 return 0; 910 911 sc->sq_intrcnt.ev_count++; 912 913 /* Always check for received packets */ 914 if (sq_rxintr(sc) != 0) 915 handled++; 916 917 /* Only handle transmit interrupts if we actually sent something */ 918 if (sc->sc_nfreetx < SQ_NTXDESC) { 919 sq_txintr(sc); 920 handled++; 921 } 922 923 if (handled) 924 rnd_add_uint32(&sc->rnd_source, stat); 925 return handled; 926} 927 928static int 929sq_rxintr(struct sq_softc *sc) 930{ 931 int count = 0; 932 struct mbuf* m; 933 int i, framelen; 934 uint8_t pktstat; 935 uint32_t status; 936 uint32_t ctl_reg; 937 int new_end, orig_end; 938 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 939 940 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 941 SQ_CDRXSYNC(sc, i, 942 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 943 944 /* 945 * If this is a CPU-owned buffer, we're at the end of the list. 946 */ 947 if (sc->hpc_regs->revision == 3) 948 ctl_reg = 949 sc->sc_rxdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_OWN; 950 else 951 ctl_reg = 952 sc->sc_rxdesc[i].hpc1_hdd_ctl & HPC1_HDD_CTL_OWN; 953 954 if (ctl_reg) { 955#if defined(SQ_DEBUG) 956 uint32_t reg; 957 958 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 959 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n", 960 device_xname(sc->sc_dev), i, reg)); 961#endif 962 break; 963 } 964 965 count++; 966 967 m = sc->sc_rxmbuf[i]; 968 framelen = m->m_ext.ext_size - 3; 969 if (sc->hpc_regs->revision == 3) 970 framelen -= 971 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl); 972 else 973 framelen -= 974 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl); 975 976 /* Now sync the actual packet data */ 977 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 978 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 979 980 pktstat = *((uint8_t *)m->m_data + framelen + 2); 981 982 if ((pktstat & RXSTAT_GOOD) == 0) { 983 if_statinc(ifp, if_ierrors); 984 985 if (pktstat & RXSTAT_OFLOW) 986 printf("%s: receive FIFO overflow\n", 987 device_xname(sc->sc_dev)); 988 989 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 990 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 991 SQ_INIT_RXDESC(sc, i); 992 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n", 993 device_xname(sc->sc_dev), i)); 994 continue; 995 } 996 997 if (sq_add_rxbuf(sc, i) != 0) { 998 if_statinc(ifp, if_ierrors); 999 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 1000 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 1001 SQ_INIT_RXDESC(sc, i); 1002 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() " 1003 "failed\n", device_xname(sc->sc_dev), i)); 1004 continue; 1005 } 1006 1007 1008 m->m_data += 2; 1009 m_set_rcvif(m, ifp); 1010 m->m_pkthdr.len = m->m_len = framelen; 1011 1012 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n", 1013 device_xname(sc->sc_dev), i, framelen)); 1014 1015 if_percpuq_enqueue(ifp->if_percpuq, m); 1016 } 1017 1018 1019 /* If anything happened, move ring start/end pointers to new spot */ 1020 if (i != sc->sc_nextrx) { 1021 /* 1022 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 1023 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 1024 */ 1025 1026 new_end = SQ_PREVRX(i); 1027 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN; 1028 SQ_CDRXSYNC(sc, new_end, 1029 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1030 1031 orig_end = SQ_PREVRX(sc->sc_nextrx); 1032 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; 1033 SQ_CDRXSYNC(sc, orig_end, 1034 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1035 1036 sc->sc_nextrx = i; 1037 } 1038 1039 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 1040 1041 /* If receive channel is stopped, restart it... */ 1042 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) { 1043 /* Pass the start of the receive ring to the HPC */ 1044 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, 1045 SQ_CDRXADDR(sc, sc->sc_nextrx)); 1046 1047 /* And turn on the HPC ethernet receive channel */ 1048 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 1049 sc->hpc_regs->enetr_ctl_active); 1050 } 1051 1052 return count; 1053} 1054 1055static int 1056sq_txintr(struct sq_softc *sc) 1057{ 1058 int shift = 0; 1059 uint32_t status, tmp; 1060 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1061 1062 if (sc->hpc_regs->revision != 3) 1063 shift = 16; 1064 1065 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift; 1066 1067 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status); 1068 1069 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1070 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD; 1071 if ((status & tmp) == 0) { 1072 if (status & TXSTAT_COLL) 1073 if_statinc_ref(ifp, nsr, if_collisions); 1074 1075 if (status & TXSTAT_UFLOW) { 1076 printf("%s: transmit underflow\n", 1077 device_xname(sc->sc_dev)); 1078 if_statinc_ref(ifp, nsr, if_oerrors); 1079 } 1080 1081 if (status & TXSTAT_16COLL) { 1082 printf("%s: max collisions reached\n", 1083 device_xname(sc->sc_dev)); 1084 if_statinc_ref(ifp, nsr, if_oerrors); 1085 if_statadd_ref(ifp, nsr, if_collisions, 16); 1086 } 1087 } 1088 IF_STAT_PUTREF(ifp); 1089 1090 /* prevtx now points to next xmit packet not yet finished */ 1091 if (sc->hpc_regs->revision == 3) 1092 sq_txring_hpc3(sc); 1093 else 1094 sq_txring_hpc1(sc); 1095 1096 /* If all packets have left the coop, cancel watchdog */ 1097 if (sc->sc_nfreetx == SQ_NTXDESC) 1098 ifp->if_timer = 0; 1099 1100 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status); 1101 if_schedule_deferred_start(ifp); 1102 1103 return 1; 1104} 1105 1106/* 1107 * Reclaim used transmit descriptors and restart the transmit DMA 1108 * engine if necessary. 1109 */ 1110static void 1111sq_txring_hpc1(struct sq_softc *sc) 1112{ 1113 /* 1114 * HPC1 doesn't tag transmitted descriptors, however, 1115 * the NDBP register points to the next descriptor that 1116 * has not yet been processed. If DMA is not in progress, 1117 * we can safely reclaim all descriptors up to NDBP, and, 1118 * if necessary, restart DMA at NDBP. Otherwise, if DMA 1119 * is active, we can only safely reclaim up to CBP. 1120 * 1121 * For now, we'll only reclaim on inactive DMA and assume 1122 * that a sufficiently large ring keeps us out of trouble. 1123 */ 1124 uint32_t reclaimto, status; 1125 int reclaimall, i = sc->sc_prevtx; 1126 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1127 1128 status = sq_hpc_read(sc, HPC1_ENETX_CTL); 1129 if (status & HPC1_ENETX_CTL_ACTIVE) { 1130 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1131 return; 1132 } else 1133 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP); 1134 1135 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto) 1136 reclaimall = 1; 1137 else 1138 reclaimall = 0; 1139 1140 while (sc->sc_nfreetx < SQ_NTXDESC) { 1141 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall) 1142 break; 1143 1144 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1145 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1146 1147 /* Sync the packet data, unload DMA map, free mbuf */ 1148 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 1149 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1150 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1151 m_freem(sc->sc_txmbuf[i]); 1152 sc->sc_txmbuf[i] = NULL; 1153 1154 if_statinc(ifp, if_opackets); 1155 sc->sc_nfreetx++; 1156 1157 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1158 1159 i = SQ_NEXTTX(i); 1160 } 1161 1162 if (sc->sc_nfreetx < SQ_NTXDESC) { 1163 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1164 1165 KASSERT(reclaimto == SQ_CDTXADDR(sc, i)); 1166 1167 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto); 1168 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto); 1169 1170 /* Kick DMA channel into life */ 1171 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE); 1172 1173 /* 1174 * Set a watchdog timer in case the chip 1175 * flakes out. 1176 */ 1177 ifp->if_timer = 5; 1178 } 1179 1180 sc->sc_prevtx = i; 1181} 1182 1183/* 1184 * Reclaim used transmit descriptors and restart the transmit DMA 1185 * engine if necessary. 1186 */ 1187static void 1188sq_txring_hpc3(struct sq_softc *sc) 1189{ 1190 /* 1191 * HPC3 tags descriptors with a bit once they've been 1192 * transmitted. We need only free each XMITDONE'd 1193 * descriptor, and restart the DMA engine if any 1194 * descriptors are left over. 1195 */ 1196 int i; 1197 uint32_t status = 0; 1198 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1199 1200 i = sc->sc_prevtx; 1201 while (sc->sc_nfreetx < SQ_NTXDESC) { 1202 /* 1203 * Check status first so we don't end up with a case of 1204 * the buffer not being finished while the DMA channel 1205 * has gone idle. 1206 */ 1207 status = sq_hpc_read(sc, HPC3_ENETX_CTL); 1208 1209 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1210 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1211 1212 /* Check for used descriptor and restart DMA chain if needed */ 1213 if ((sc->sc_txdesc[i].hpc3_hdd_ctl & 1214 HPC3_HDD_CTL_XMITDONE) == 0) { 1215 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) { 1216 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1217 1218 sq_hpc_write(sc, HPC3_ENETX_NDBP, 1219 SQ_CDTXADDR(sc, i)); 1220 1221 /* Kick DMA channel into life */ 1222 sq_hpc_write(sc, HPC3_ENETX_CTL, 1223 HPC3_ENETX_CTL_ACTIVE); 1224 1225 /* 1226 * Set a watchdog timer in case the chip 1227 * flakes out. 1228 */ 1229 ifp->if_timer = 5; 1230 } else 1231 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1232 break; 1233 } 1234 1235 /* Sync the packet data, unload DMA map, free mbuf */ 1236 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 1237 0, sc->sc_txmap[i]->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1238 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1239 m_freem(sc->sc_txmbuf[i]); 1240 sc->sc_txmbuf[i] = NULL; 1241 1242 if_statinc(ifp, if_opackets); 1243 sc->sc_nfreetx++; 1244 1245 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1246 i = SQ_NEXTTX(i); 1247 } 1248 1249 sc->sc_prevtx = i; 1250} 1251 1252void 1253sq_reset(struct sq_softc *sc) 1254{ 1255 1256 /* Stop HPC dma channels */ 1257 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0); 1258 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0); 1259 1260 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3); 1261 delay(20); 1262 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0); 1263} 1264 1265/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1266int 1267sq_add_rxbuf(struct sq_softc *sc, int idx) 1268{ 1269 int err; 1270 struct mbuf *m; 1271 1272 MGETHDR(m, M_DONTWAIT, MT_DATA); 1273 if (m == NULL) 1274 return ENOBUFS; 1275 1276 MCLGET(m, M_DONTWAIT); 1277 if ((m->m_flags & M_EXT) == 0) { 1278 m_freem(m); 1279 return ENOBUFS; 1280 } 1281 1282 if (sc->sc_rxmbuf[idx] != NULL) 1283 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1284 1285 sc->sc_rxmbuf[idx] = m; 1286 1287 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1288 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1289 printf("%s: can't load rx DMA map %d, error = %d\n", 1290 device_xname(sc->sc_dev), idx, err); 1291 panic("sq_add_rxbuf"); /* XXX */ 1292 } 1293 1294 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 1295 0, sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1296 1297 SQ_INIT_RXDESC(sc, idx); 1298 1299 return 0; 1300} 1301 1302void 1303sq_dump_buffer(paddr_t addr, psize_t len) 1304{ 1305 u_int i; 1306 uint8_t *physaddr = (uint8_t *)MIPS_PHYS_TO_KSEG1(addr); 1307 1308 if (len == 0) 1309 return; 1310 1311 printf("%p: ", physaddr); 1312 1313 for (i = 0; i < len; i++) { 1314 printf("%02x ", *(physaddr + i) & 0xff); 1315 if ((i % 16) == 15 && i != len - 1) 1316 printf("\n%p: ", physaddr + i); 1317 } 1318 1319 printf("\n"); 1320} 1321