72 73#include <dev/utopia/utopia.h> 74#include <dev/hatm/if_hatmconf.h> 75#include <dev/hatm/if_hatmreg.h> 76#include <dev/hatm/if_hatmvar.h> 77 78CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE); 79CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK); 80CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK); 81CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE); 82CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE); 83CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE); 84 85/* 86 * Either the queue treshold was crossed or a TPD with the INTR bit set 87 * was transmitted. 88 */ 89static void 90he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group) 91{ 92 uint32_t *tailp = &sc->hsp->group[group].tbrq_tail; 93 u_int no; 94 95 while (q->head != (*tailp >> 2)) { 96 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >> 97 HE_REGS_TPD_ADDR; 98 hatm_tx_complete(sc, TPD_ADDR(sc, no), 99 (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS)); 100 101 if (++q->head == q->size) 102 q->head = 0; 103 } 104 WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2); 105} 106 107/* 108 * DMA loader function for external mbuf page. 109 */ 110static void 111hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, 112 int error) 113{ 114 if (error) { 115 printf("%s: mapping error %d\n", __func__, error); 116 return; 117 } 118 KASSERT(nsegs == 1, 119 ("too many segments for DMA: %d", nsegs)); 120 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 121 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 122 123 *(uint32_t *)arg = segs[0].ds_addr; 124} 125 126/* 127 * Allocate a page of external mbuf storage for the small pools. 128 * Create a DMA map and load it. Put all the chunks onto the right 129 * free list. 130 */ 131static void 132hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group) 133{ 134 struct mbuf_page *pg; 135 int err; 136 u_int i; 137 138 if (sc->mbuf_npages == HE_CONFIG_MAX_MBUF_PAGES) 139 return; 140 if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) 141 return; 142 bzero(pg->hdr.card, sizeof(pg->hdr.card)); 143 bzero(pg->hdr.used, sizeof(pg->hdr.used)); 144 145 err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map); 146 if (err != 0) { 147 if_printf(&sc->ifatm.ifnet, "%s -- bus_dmamap_create: %d\n", 148 __func__, err); 149 free(pg, M_DEVBUF); 150 return; 151 } 152 err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE, 153 hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT); 154 if (err != 0) { 155 if_printf(&sc->ifatm.ifnet, "%s -- mbuf mapping failed %d\n", 156 __func__, err); 157 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); 158 free(pg, M_DEVBUF); 159 return; 160 } 161 162 sc->mbuf_pages[sc->mbuf_npages] = pg; 163 164 if (group == 0) { 165 struct mbuf0_chunk *c; 166 167 pg->hdr.nchunks = MBUF0_PER_PAGE; 168 pg->hdr.chunksize = MBUF0_CHUNK; 169 pg->hdr.hdroff = sizeof(c->storage); 170 c = (struct mbuf0_chunk *)pg; 171 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) { 172 c->hdr.pageno = sc->mbuf_npages; 173 c->hdr.chunkno = i; 174 SLIST_INSERT_HEAD(&sc->mbuf0_list, 175 (struct mbufx_free *)c, link); 176 } 177 } else { 178 struct mbuf1_chunk *c; 179 180 pg->hdr.nchunks = MBUF1_PER_PAGE; 181 pg->hdr.chunksize = MBUF1_CHUNK; 182 pg->hdr.hdroff = sizeof(c->storage); 183 c = (struct mbuf1_chunk *)pg; 184 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) { 185 c->hdr.pageno = sc->mbuf_npages; 186 c->hdr.chunkno = i; 187 SLIST_INSERT_HEAD(&sc->mbuf1_list, 188 (struct mbufx_free *)c, link); 189 } 190 } 191 sc->mbuf_npages++; 192} 193 194/* 195 * Free an mbuf and put it onto the free list. 196 */ 197static void 198hatm_mbuf0_free(void *buf, void *args) 199{ 200 struct hatm_softc *sc = args; 201 struct mbuf0_chunk *c = buf; 202 203 mtx_lock(&sc->mbuf0_mtx); 204 SLIST_INSERT_HEAD(&sc->mbuf0_list, (struct mbufx_free *)c, link); 205 MBUF_CLR_BIT(sc->mbuf_pages[c->hdr.pageno]->hdr.used, c->hdr.chunkno); 206 mtx_unlock(&sc->mbuf0_mtx); 207} 208static void 209hatm_mbuf1_free(void *buf, void *args) 210{ 211 struct hatm_softc *sc = args; 212 struct mbuf1_chunk *c = buf; 213 214 mtx_lock(&sc->mbuf1_mtx); 215 SLIST_INSERT_HEAD(&sc->mbuf1_list, (struct mbufx_free *)c, link); 216 MBUF_CLR_BIT(sc->mbuf_pages[c->hdr.pageno]->hdr.used, c->hdr.chunkno); 217 mtx_unlock(&sc->mbuf1_mtx); 218} 219 220/* 221 * Allocate an external mbuf storage 222 */ 223static int 224hatm_mbuf_alloc(struct hatm_softc *sc, u_int group, struct mbuf *m, 225 uint32_t *phys, uint32_t *handle) 226{ 227 struct mbufx_free *cf; 228 struct mbuf_page *pg; 229 230 if (group == 0) { 231 struct mbuf0_chunk *buf0; 232 233 mtx_lock(&sc->mbuf0_mtx); 234 if ((cf = SLIST_FIRST(&sc->mbuf0_list)) == NULL) { 235 hatm_mbuf_page_alloc(sc, group); 236 if ((cf = SLIST_FIRST(&sc->mbuf0_list)) == NULL) { 237 mtx_unlock(&sc->mbuf0_mtx); 238 return (0); 239 } 240 } 241 SLIST_REMOVE_HEAD(&sc->mbuf0_list, link); 242 buf0 = (struct mbuf0_chunk *)cf; 243 pg = sc->mbuf_pages[buf0->hdr.pageno]; 244 MBUF_SET_BIT(pg->hdr.card, buf0->hdr.chunkno); 245 mtx_unlock(&sc->mbuf0_mtx); 246 247 m_extadd(m, (caddr_t)buf0, MBUF0_SIZE, hatm_mbuf0_free, sc, 248 M_PKTHDR, EXT_NET_DRV); 249 m->m_data += MBUF0_OFFSET; 250 buf0->hdr.mbuf = m; 251 252 *handle = MBUF_MAKE_HANDLE(buf0->hdr.pageno, buf0->hdr.chunkno); 253 254 } else if (group == 1) { 255 struct mbuf1_chunk *buf1; 256 257 mtx_lock(&sc->mbuf1_mtx); 258 if ((cf = SLIST_FIRST(&sc->mbuf1_list)) == NULL) { 259 hatm_mbuf_page_alloc(sc, group); 260 if ((cf = SLIST_FIRST(&sc->mbuf1_list)) == NULL) { 261 mtx_unlock(&sc->mbuf1_mtx); 262 return (0); 263 } 264 } 265 SLIST_REMOVE_HEAD(&sc->mbuf1_list, link); 266 buf1 = (struct mbuf1_chunk *)cf; 267 pg = sc->mbuf_pages[buf1->hdr.pageno]; 268 MBUF_SET_BIT(pg->hdr.card, buf1->hdr.chunkno); 269 mtx_unlock(&sc->mbuf1_mtx); 270 271 m_extadd(m, (caddr_t)buf1, MBUF1_SIZE, hatm_mbuf1_free, sc, 272 M_PKTHDR, EXT_NET_DRV); 273 m->m_data += MBUF1_OFFSET; 274 buf1->hdr.mbuf = m; 275 276 *handle = MBUF_MAKE_HANDLE(buf1->hdr.pageno, buf1->hdr.chunkno); 277 278 } else 279 return (-1); 280 281 *phys = pg->hdr.phys + (mtod(m, char *) - (char *)pg); 282 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, BUS_DMASYNC_PREREAD); 283 284 return (0); 285} 286 287static void 288hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 289{ 290 uint32_t *ptr = (uint32_t *)arg; 291 292 if (nsegs == 0) { 293 printf("%s: error=%d\n", __func__, error); 294 return; 295 } 296 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs)); 297 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 298 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 299 300 *ptr = segs[0].ds_addr; 301} 302 303/* 304 * Receive buffer pool interrupt. This means the number of entries in the 305 * queue has dropped below the threshold. Try to supply new buffers. 306 */ 307static void 308he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large, 309 u_int group) 310{ 311 u_int ntail, upd; 312 struct mbuf *m; 313 int error; 314 315 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u", 316 large ? "large" : "small", group)); 317 318 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD) 319 & (rbp->size - 1); 320 321 upd = 0; 322 for (;;) { 323 if ((ntail = rbp->tail + 1) == rbp->size) 324 ntail = 0; 325 if (ntail == rbp->head) 326 break; 327 328 /* allocate the MBUF */ 329 if (large) { 330 if ((m = m_getcl(M_DONTWAIT, MT_DATA, 331 M_PKTHDR)) == NULL) { 332 if_printf(&sc->ifatm.ifnet, 333 "no mbuf clusters\n"); 334 break; 335 } 336 m->m_data += MBUFL_OFFSET; 337 338 if (sc->lbufs[sc->lbufs_next] != NULL) 339 panic("hatm: lbufs full %u", sc->lbufs_next); 340 sc->lbufs[sc->lbufs_next] = m; 341 342 if ((error = bus_dmamap_load(sc->mbuf_tag, 343 sc->rmaps[sc->lbufs_next], 344 m->m_data, rbp->bsize, hatm_mbuf_helper, 345 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != NULL) 346 panic("hatm: mbuf mapping failed %d", error); 347 348 bus_dmamap_sync(sc->mbuf_tag, 349 sc->rmaps[sc->lbufs_next], 350 BUS_DMASYNC_PREREAD); 351 352 rbp->rbp[rbp->tail].handle = sc->lbufs_next | 353 MBUF_LARGE_FLAG; 354 355 if (++sc->lbufs_next == sc->lbufs_size) 356 sc->lbufs_next = 0; 357 358 } else { 359 MGETHDR(m, M_DONTWAIT, MT_DATA); 360 if (m == NULL) { 361 if_printf(&sc->ifatm.ifnet, "no mbufs\n"); 362 break; 363 } 364 if (hatm_mbuf_alloc(sc, group, m, 365 &rbp->rbp[rbp->tail].phys, 366 &rbp->rbp[rbp->tail].handle)) { 367 m_freem(m); 368 break; 369 } 370 } 371 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x", 372 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys)); 373 rbp->rbp[rbp->tail].handle <<= HE_REGS_RBRQ_ADDR; 374 375 rbp->tail = ntail; 376 upd++; 377 } 378 if (upd) { 379 WRITE4(sc, HE_REGO_RBP_T(large, group), 380 (rbp->tail << HE_REGS_RBP_TAIL)); 381 } 382} 383 384/* 385 * Extract the buffer and hand it to the receive routine 386 */ 387static struct mbuf * 388hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle) 389{ 390 u_int pageno; 391 u_int chunkno; 392 struct mbuf *m; 393 394 if (handle & MBUF_LARGE_FLAG) { 395 /* large buffer - sync and unload */ 396 handle &= ~MBUF_LARGE_FLAG; 397 DBG(sc, RX, ("RX large handle=%x", handle)); 398 399 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle], 400 BUS_DMASYNC_POSTREAD); 401 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]); 402 403 m = sc->lbufs[handle]; 404 sc->lbufs[handle] = NULL; 405 406 return (m); 407 } 408 409 MBUF_PARSE_HANDLE(handle, pageno, chunkno); 410 411 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle, 412 pageno, chunkno)); 413 414 if (group == 0) { 415 struct mbuf0_chunk *c0; 416 417 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno; 418 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u", 419 c0->hdr.pageno, pageno)); 420 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u", 421 c0->hdr.chunkno, chunkno)); 422 423 m = c0->hdr.mbuf; 424 425 } else { 426 struct mbuf1_chunk *c1; 427 428 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno; 429 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u", 430 c1->hdr.pageno, pageno)); 431 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u", 432 c1->hdr.chunkno, chunkno)); 433 434 m = c1->hdr.mbuf; 435 } 436 MBUF_CLR_BIT(sc->mbuf_pages[pageno]->hdr.card, chunkno); 437 MBUF_SET_BIT(sc->mbuf_pages[pageno]->hdr.used, chunkno); 438 439 bus_dmamap_sync(sc->mbuf_tag, sc->mbuf_pages[pageno]->hdr.map, 440 BUS_DMASYNC_POSTREAD); 441 442 return (m); 443} 444 445/* 446 * Interrupt because of receive buffer returned. 447 */ 448static void 449he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) 450{ 451 struct he_rbrqen *e; 452 uint32_t flags, tail; 453 u_int cid, len; 454 struct mbuf *m; 455 456 for (;;) { 457 tail = sc->hsp->group[group].rbrq_tail >> 3; 458 459 if (rq->head == tail) 460 break; 461 462 e = &rq->rbrq[rq->head]; 463 464 flags = e->addr & HE_REGM_RBRQ_FLAGS; 465 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR)) 466 m = hatm_rx_buffer(sc, group, 467 (e->addr & HE_REGM_RBRQ_ADDR) >> HE_REGS_RBRQ_ADDR); 468 else 469 m = NULL; 470 471 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID; 472 len = 4 * (e->len & HE_REGM_RBRQ_LEN); 473 474 hatm_rx(sc, cid, flags, m, len); 475 476 if (++rq->head == rq->size) 477 rq->head = 0; 478 } 479 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3); 480} 481 482void 483hatm_intr(void *p) 484{ 485 struct heirq *q = p; 486 struct hatm_softc *sc = q->sc; 487 u_int status; 488 u_int tail; 489 490 /* if we have a stray interrupt with a non-initialized card, 491 * we cannot even lock before looking at the flag */ 492 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) 493 return; 494 495 mtx_lock(&sc->mtx); 496 (void)READ4(sc, HE_REGO_INT_FIFO); 497 498 tail = *q->tailp; 499 if (q->head == tail) { 500 /* workaround for tail pointer not updated bug (8.1.1) */ 501 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered")); 502 503 /* read the tail pointer from the card */ 504 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) & 505 HE_REGM_IRQ_BASE_TAIL; 506 BARRIER_R(sc); 507 508 sc->istats.bug_no_irq_upd++; 509 } 510 511 /* clear the interrupt */ 512 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA); 513 BARRIER_W(sc); 514 515 while (q->head != tail) { 516 status = q->irq[q->head]; 517 q->irq[q->head] = HE_REGM_ITYPE_INVALID; 518 if (++q->head == (q->size - 1)) 519 q->head = 0; 520 521 switch (status & HE_REGM_ITYPE) { 522 523 case HE_REGM_ITYPE_TBRQ: 524 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP)); 525 sc->istats.itype_tbrq++; 526 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 527 break; 528 529 case HE_REGM_ITYPE_TPD: 530 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP)); 531 sc->istats.itype_tpd++; 532 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 533 break; 534 535 case HE_REGM_ITYPE_RBPS: 536 sc->istats.itype_rbps++; 537 switch (status & HE_REGM_IGROUP) { 538 539 case 0: 540 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 541 break; 542 543 case 1: 544 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 545 break; 546 547 default: 548 if_printf(&sc->ifatm.ifnet, "bad INTR RBPS%u\n", 549 status & HE_REGM_IGROUP); 550 break; 551 } 552 break; 553 554 case HE_REGM_ITYPE_RBPL: 555 sc->istats.itype_rbpl++; 556 switch (status & HE_REGM_IGROUP) { 557 558 case 0: 559 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 560 break; 561 562 default: 563 if_printf(&sc->ifatm.ifnet, "bad INTR RBPL%u\n", 564 status & HE_REGM_IGROUP); 565 break; 566 } 567 break; 568 569 case HE_REGM_ITYPE_RBRQ: 570 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP)); 571 sc->istats.itype_rbrq++; 572 switch (status & HE_REGM_IGROUP) { 573 574 case 0: 575 he_intr_rbrq(sc, &sc->rbrq_0, 0); 576 break; 577 578 case 1: 579 if (sc->rbrq_1.size > 0) { 580 he_intr_rbrq(sc, &sc->rbrq_1, 1); 581 break; 582 } 583 /* FALLTHRU */ 584 585 default: 586 if_printf(&sc->ifatm.ifnet, "bad INTR RBRQ%u\n", 587 status & HE_REGM_IGROUP); 588 break; 589 } 590 break; 591 592 case HE_REGM_ITYPE_RBRQT: 593 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP)); 594 sc->istats.itype_rbrqt++; 595 switch (status & HE_REGM_IGROUP) { 596 597 case 0: 598 he_intr_rbrq(sc, &sc->rbrq_0, 0); 599 break; 600 601 case 1: 602 if (sc->rbrq_1.size > 0) { 603 he_intr_rbrq(sc, &sc->rbrq_1, 1); 604 break; 605 } 606 /* FALLTHRU */ 607 608 default: 609 if_printf(&sc->ifatm.ifnet, "bad INTR RBRQT%u\n", 610 status & HE_REGM_IGROUP); 611 break; 612 } 613 break; 614 615 case HE_REGM_ITYPE_PHYS: 616 sc->istats.itype_phys++; 617 utopia_intr(&sc->utopia); 618 break; 619 620#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID 621 case HE_REGM_ITYPE_UNKNOWN: 622 sc->istats.itype_unknown++; 623 if_printf(&sc->ifatm.ifnet, "bad interrupt\n"); 624 break; 625#endif 626 627 case HE_REGM_ITYPE_ERR: 628 sc->istats.itype_err++; 629 switch (status) { 630 631 case HE_REGM_ITYPE_PERR: 632 if_printf(&sc->ifatm.ifnet, "parity error\n"); 633 break; 634 635 case HE_REGM_ITYPE_ABORT: 636 if_printf(&sc->ifatm.ifnet, "abort interrupt " 637 "addr=0x%08x\n", 638 READ4(sc, HE_REGO_ABORT_ADDR)); 639 break; 640 641 default: 642 if_printf(&sc->ifatm.ifnet, 643 "bad interrupt type %08x\n", status); 644 break; 645 } 646 break; 647 648 case HE_REGM_ITYPE_INVALID: 649 /* this is the documented fix for the ISW bug 8.1.1 650 * Note, that the documented fix is partly wrong: 651 * the ISWs should be intialized to 0xf8 not 0xff */ 652 sc->istats.bug_bad_isw++; 653 DBG(sc, INTR, ("hatm: invalid ISW bug triggered")); 654 he_intr_tbrq(sc, &sc->tbrq, 0); 655 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 656 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 657 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 658 he_intr_rbrq(sc, &sc->rbrq_0, 0); 659 he_intr_rbrq(sc, &sc->rbrq_1, 1); 660 utopia_intr(&sc->utopia); 661 break; 662 663 default: 664 if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n", 665 status); 666 break; 667 } 668 } 669 670 /* write back head to clear queue */ 671 WRITE4(sc, HE_REGO_IRQ_HEAD(0), 672 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | 673 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) | 674 (q->head << HE_REGS_IRQ_HEAD_HEAD)); 675 BARRIER_W(sc); 676 677 /* workaround the back-to-back irq access problem (8.1.2) */ 678 (void)READ4(sc, HE_REGO_INT_FIFO); 679 BARRIER_R(sc); 680 681 mtx_unlock(&sc->mtx); 682}
|