if_hatm_intr.c revision 121677
1/* 2 * Copyright (c) 2001-2003 3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). 4 * All rights reserved. 5 * Author: Hartmut Brandt <harti@freebsd.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm_intr.c 121677 2003-10-29 13:21:38Z harti $"); 31 32/* 33 * ForeHE driver. 34 * 35 * Interrupt handler. 36 */ 37 38#include "opt_inet.h" 39#include "opt_natm.h" 40 41#include <sys/types.h> 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/malloc.h> 45#include <sys/kernel.h> 46#include <sys/bus.h> 47#include <sys/errno.h> 48#include <sys/conf.h> 49#include <sys/module.h> 50#include <sys/queue.h> 51#include <sys/syslog.h> 52#include <sys/condvar.h> 53#include <sys/sysctl.h> 54#include <vm/uma.h> 55 56#include <sys/sockio.h> 57#include <sys/mbuf.h> 58#include <sys/socket.h> 59 60#include <net/if.h> 61#include <net/if_media.h> 62#include <net/if_atm.h> 63#include <net/route.h> 64#include <netinet/in.h> 65#include <netinet/if_atm.h> 66 67#include <machine/bus.h> 68#include <machine/resource.h> 69#include <sys/bus.h> 70#include <sys/rman.h> 71#include <dev/pci/pcireg.h> 72#include <dev/pci/pcivar.h> 73 74#include <dev/utopia/utopia.h> 75#include <dev/hatm/if_hatmconf.h> 76#include <dev/hatm/if_hatmreg.h> 77#include <dev/hatm/if_hatmvar.h> 78 79CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE); 80CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK); 81CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK); 82CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE); 83CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE); 84CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE); 85 86static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group); 87 88/* 89 * Free an external mbuf to a list. We use atomic functions so that 90 * we don't need a mutex for the list. 91 */ 92static __inline void 93hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf) 94{ 95 for (;;) { 96 buf->link = *list; 97 if (atomic_cmpset_ptr(list, buf->link, buf)) 98 break; 99 } 100} 101 102static __inline struct mbufx_free * 103hatm_ext_alloc(struct hatm_softc *sc, u_int g) 104{ 105 struct mbufx_free *buf; 106 107 for (;;) { 108 if ((buf = sc->mbuf_list[g]) == NULL) 109 break; 110 if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link)) 111 break; 112 } 113 if (buf == NULL) { 114 hatm_mbuf_page_alloc(sc, g); 115 for (;;) { 116 if ((buf = sc->mbuf_list[g]) == NULL) 117 break; 118 if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link)) 119 break; 120 } 121 } 122 return (buf); 123} 124 125/* 126 * Either the queue treshold was crossed or a TPD with the INTR bit set 127 * was transmitted. 128 */ 129static void 130he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group) 131{ 132 uint32_t *tailp = &sc->hsp->group[group].tbrq_tail; 133 u_int no; 134 135 while (q->head != (*tailp >> 2)) { 136 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >> 137 HE_REGS_TPD_ADDR; 138 hatm_tx_complete(sc, TPD_ADDR(sc, no), 139 (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS)); 140 141 if (++q->head == q->size) 142 q->head = 0; 143 } 144 WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2); 145} 146 147/* 148 * DMA loader function for external mbuf page. 149 */ 150static void 151hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, 152 int error) 153{ 154 if (error) { 155 printf("%s: mapping error %d\n", __func__, error); 156 return; 157 } 158 KASSERT(nsegs == 1, 159 ("too many segments for DMA: %d", nsegs)); 160 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 161 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 162 163 *(uint32_t *)arg = segs[0].ds_addr; 164} 165 166/* 167 * Allocate a page of external mbuf storage for the small pools. 168 * Create a DMA map and load it. Put all the chunks onto the right 169 * free list. 170 */ 171static void 172hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group) 173{ 174 struct mbuf_page *pg; 175 int err; 176 u_int i; 177 178 if (sc->mbuf_npages == HE_CONFIG_MAX_MBUF_PAGES) 179 return; 180 if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) 181 return; 182 bzero(pg->hdr.card, sizeof(pg->hdr.card)); 183 184 err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map); 185 if (err != 0) { 186 if_printf(&sc->ifatm.ifnet, "%s -- bus_dmamap_create: %d\n", 187 __func__, err); 188 free(pg, M_DEVBUF); 189 return; 190 } 191 err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE, 192 hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT); 193 if (err != 0) { 194 if_printf(&sc->ifatm.ifnet, "%s -- mbuf mapping failed %d\n", 195 __func__, err); 196 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); 197 free(pg, M_DEVBUF); 198 return; 199 } 200 201 sc->mbuf_pages[sc->mbuf_npages] = pg; 202 203 if (group == 0) { 204 struct mbuf0_chunk *c; 205 206 pg->hdr.nchunks = MBUF0_PER_PAGE; 207 pg->hdr.chunksize = MBUF0_CHUNK; 208 pg->hdr.hdroff = sizeof(c->storage); 209 c = (struct mbuf0_chunk *)pg; 210 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) { 211 c->hdr.pageno = sc->mbuf_npages; 212 c->hdr.chunkno = i; 213 hatm_ext_free(&sc->mbuf_list[0], 214 (struct mbufx_free *)c); 215 } 216 } else { 217 struct mbuf1_chunk *c; 218 219 pg->hdr.nchunks = MBUF1_PER_PAGE; 220 pg->hdr.chunksize = MBUF1_CHUNK; 221 pg->hdr.hdroff = sizeof(c->storage); 222 c = (struct mbuf1_chunk *)pg; 223 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) { 224 c->hdr.pageno = sc->mbuf_npages; 225 c->hdr.chunkno = i; 226 hatm_ext_free(&sc->mbuf_list[1], 227 (struct mbufx_free *)c); 228 } 229 } 230 sc->mbuf_npages++; 231} 232 233/* 234 * Free an mbuf and put it onto the free list. 235 */ 236static void 237hatm_mbuf0_free(void *buf, void *args) 238{ 239 struct hatm_softc *sc = args; 240 struct mbuf0_chunk *c = buf; 241 242 hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c); 243} 244static void 245hatm_mbuf1_free(void *buf, void *args) 246{ 247 struct hatm_softc *sc = args; 248 struct mbuf1_chunk *c = buf; 249 250 hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c); 251} 252 253/* 254 * Allocate an external mbuf storage 255 */ 256static int 257hatm_mbuf_alloc(struct hatm_softc *sc, u_int group, uint32_t *phys, 258 uint32_t *handle) 259{ 260 struct mbufx_free *cf; 261 struct mbuf_page *pg; 262 263 if (group == 0) { 264 struct mbuf0_chunk *buf0; 265 266 if ((cf = hatm_ext_alloc(sc, 0)) == NULL) 267 return (-1); 268 buf0 = (struct mbuf0_chunk *)cf; 269 pg = sc->mbuf_pages[buf0->hdr.pageno]; 270 MBUF_SET_BIT(pg->hdr.card, buf0->hdr.chunkno); 271 272 *handle = MBUF_MAKE_HANDLE(buf0->hdr.pageno, buf0->hdr.chunkno); 273 *phys = pg->hdr.phys + buf0->hdr.chunkno * MBUF0_CHUNK + 274 MBUF0_OFFSET; 275 276 } else if (group == 1) { 277 struct mbuf1_chunk *buf1; 278 279 if ((cf = hatm_ext_alloc(sc, 1)) == NULL) 280 return (-1); 281 buf1 = (struct mbuf1_chunk *)cf; 282 pg = sc->mbuf_pages[buf1->hdr.pageno]; 283 MBUF_SET_BIT(pg->hdr.card, buf1->hdr.chunkno); 284 285 *handle = MBUF_MAKE_HANDLE(buf1->hdr.pageno, buf1->hdr.chunkno); 286 *phys = pg->hdr.phys + buf1->hdr.chunkno * MBUF1_CHUNK + 287 MBUF1_OFFSET; 288 289 } else 290 return (-1); 291 292 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, BUS_DMASYNC_PREREAD); 293 294 return (0); 295} 296 297static void 298hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 299{ 300 uint32_t *ptr = (uint32_t *)arg; 301 302 if (nsegs == 0) { 303 printf("%s: error=%d\n", __func__, error); 304 return; 305 } 306 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs)); 307 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 308 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 309 310 *ptr = segs[0].ds_addr; 311} 312 313/* 314 * Receive buffer pool interrupt. This means the number of entries in the 315 * queue has dropped below the threshold. Try to supply new buffers. 316 */ 317static void 318he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large, 319 u_int group) 320{ 321 u_int ntail; 322 struct mbuf *m; 323 int error; 324 325 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u", 326 large ? "large" : "small", group)); 327 328 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD) 329 & (rbp->size - 1); 330 331 for (;;) { 332 if ((ntail = rbp->tail + 1) == rbp->size) 333 ntail = 0; 334 if (ntail == rbp->head) 335 break; 336 337 if (large) { 338 /* allocate the MBUF */ 339 if ((m = m_getcl(M_DONTWAIT, MT_DATA, 340 M_PKTHDR)) == NULL) { 341 if_printf(&sc->ifatm.ifnet, 342 "no mbuf clusters\n"); 343 break; 344 } 345 m->m_data += MBUFL_OFFSET; 346 347 if (sc->lbufs[sc->lbufs_next] != NULL) 348 panic("hatm: lbufs full %u", sc->lbufs_next); 349 sc->lbufs[sc->lbufs_next] = m; 350 351 if ((error = bus_dmamap_load(sc->mbuf_tag, 352 sc->rmaps[sc->lbufs_next], 353 m->m_data, rbp->bsize, hatm_mbuf_helper, 354 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != NULL) 355 panic("hatm: mbuf mapping failed %d", error); 356 357 bus_dmamap_sync(sc->mbuf_tag, 358 sc->rmaps[sc->lbufs_next], 359 BUS_DMASYNC_PREREAD); 360 361 rbp->rbp[rbp->tail].handle = sc->lbufs_next | 362 MBUF_LARGE_FLAG; 363 364 if (++sc->lbufs_next == sc->lbufs_size) 365 sc->lbufs_next = 0; 366 367 } else { 368 m = NULL; 369 if (hatm_mbuf_alloc(sc, group, 370 &rbp->rbp[rbp->tail].phys, 371 &rbp->rbp[rbp->tail].handle)) { 372 m_freem(m); 373 break; 374 } 375 } 376 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x", 377 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys)); 378 rbp->rbp[rbp->tail].handle <<= HE_REGS_RBRQ_ADDR; 379 380 rbp->tail = ntail; 381 } 382 WRITE4(sc, HE_REGO_RBP_T(large, group), 383 (rbp->tail << HE_REGS_RBP_TAIL)); 384} 385 386/* 387 * Extract the buffer and hand it to the receive routine 388 */ 389static struct mbuf * 390hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle) 391{ 392 u_int pageno; 393 u_int chunkno; 394 struct mbuf *m; 395 396 if (handle & MBUF_LARGE_FLAG) { 397 /* large buffer - sync and unload */ 398 handle &= ~MBUF_LARGE_FLAG; 399 DBG(sc, RX, ("RX large handle=%x", handle)); 400 401 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle], 402 BUS_DMASYNC_POSTREAD); 403 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]); 404 405 m = sc->lbufs[handle]; 406 sc->lbufs[handle] = NULL; 407 408 return (m); 409 } 410 411 MBUF_PARSE_HANDLE(handle, pageno, chunkno); 412 MBUF_CLR_BIT(sc->mbuf_pages[pageno]->hdr.card, chunkno); 413 414 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle, 415 pageno, chunkno)); 416 417 MGETHDR(m, M_DONTWAIT, MT_DATA); 418 419 if (group == 0) { 420 struct mbuf0_chunk *c0; 421 422 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno; 423 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u", 424 c0->hdr.pageno, pageno)); 425 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u", 426 c0->hdr.chunkno, chunkno)); 427 428 if (m != NULL) { 429 m->m_ext.ref_cnt = &c0->hdr.ref_cnt; 430 m_extadd(m, (void *)c0, MBUF0_SIZE, 431 hatm_mbuf0_free, sc, M_PKTHDR, EXT_EXTREF); 432 m->m_data += MBUF0_OFFSET; 433 } else 434 hatm_mbuf0_free(c0, sc); 435 436 } else { 437 struct mbuf1_chunk *c1; 438 439 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno; 440 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u", 441 c1->hdr.pageno, pageno)); 442 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u", 443 c1->hdr.chunkno, chunkno)); 444 445 if (m != NULL) { 446 m->m_ext.ref_cnt = &c1->hdr.ref_cnt; 447 m_extadd(m, (void *)c1, MBUF1_SIZE, 448 hatm_mbuf1_free, sc, M_PKTHDR, EXT_EXTREF); 449 m->m_data += MBUF1_OFFSET; 450 } else 451 hatm_mbuf1_free(c1, sc); 452 } 453 454 return (m); 455} 456 457/* 458 * Interrupt because of receive buffer returned. 459 */ 460static void 461he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) 462{ 463 struct he_rbrqen *e; 464 uint32_t flags, tail; 465 u_int cid, len; 466 struct mbuf *m; 467 468 for (;;) { 469 tail = sc->hsp->group[group].rbrq_tail >> 3; 470 471 if (rq->head == tail) 472 break; 473 474 e = &rq->rbrq[rq->head]; 475 476 flags = e->addr & HE_REGM_RBRQ_FLAGS; 477 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR)) 478 m = hatm_rx_buffer(sc, group, 479 (e->addr & HE_REGM_RBRQ_ADDR) >> HE_REGS_RBRQ_ADDR); 480 else 481 m = NULL; 482 483 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID; 484 len = 4 * (e->len & HE_REGM_RBRQ_LEN); 485 486 hatm_rx(sc, cid, flags, m, len); 487 488 if (++rq->head == rq->size) 489 rq->head = 0; 490 } 491 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3); 492} 493 494void 495hatm_intr(void *p) 496{ 497 struct heirq *q = p; 498 struct hatm_softc *sc = q->sc; 499 u_int status; 500 u_int tail; 501 502 /* if we have a stray interrupt with a non-initialized card, 503 * we cannot even lock before looking at the flag */ 504 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) 505 return; 506 507 mtx_lock(&sc->mtx); 508 (void)READ4(sc, HE_REGO_INT_FIFO); 509 510 tail = *q->tailp; 511 if (q->head == tail) { 512 /* workaround for tail pointer not updated bug (8.1.1) */ 513 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered")); 514 515 /* read the tail pointer from the card */ 516 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) & 517 HE_REGM_IRQ_BASE_TAIL; 518 BARRIER_R(sc); 519 520 sc->istats.bug_no_irq_upd++; 521 } 522 523 /* clear the interrupt */ 524 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA); 525 BARRIER_W(sc); 526 527 while (q->head != tail) { 528 status = q->irq[q->head]; 529 q->irq[q->head] = HE_REGM_ITYPE_INVALID; 530 if (++q->head == (q->size - 1)) 531 q->head = 0; 532 533 switch (status & HE_REGM_ITYPE) { 534 535 case HE_REGM_ITYPE_TBRQ: 536 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP)); 537 sc->istats.itype_tbrq++; 538 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 539 break; 540 541 case HE_REGM_ITYPE_TPD: 542 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP)); 543 sc->istats.itype_tpd++; 544 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 545 break; 546 547 case HE_REGM_ITYPE_RBPS: 548 sc->istats.itype_rbps++; 549 switch (status & HE_REGM_IGROUP) { 550 551 case 0: 552 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 553 break; 554 555 case 1: 556 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 557 break; 558 559 default: 560 if_printf(&sc->ifatm.ifnet, "bad INTR RBPS%u\n", 561 status & HE_REGM_IGROUP); 562 break; 563 } 564 break; 565 566 case HE_REGM_ITYPE_RBPL: 567 sc->istats.itype_rbpl++; 568 switch (status & HE_REGM_IGROUP) { 569 570 case 0: 571 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 572 break; 573 574 default: 575 if_printf(&sc->ifatm.ifnet, "bad INTR RBPL%u\n", 576 status & HE_REGM_IGROUP); 577 break; 578 } 579 break; 580 581 case HE_REGM_ITYPE_RBRQ: 582 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP)); 583 sc->istats.itype_rbrq++; 584 switch (status & HE_REGM_IGROUP) { 585 586 case 0: 587 he_intr_rbrq(sc, &sc->rbrq_0, 0); 588 break; 589 590 case 1: 591 if (sc->rbrq_1.size > 0) { 592 he_intr_rbrq(sc, &sc->rbrq_1, 1); 593 break; 594 } 595 /* FALLTHRU */ 596 597 default: 598 if_printf(&sc->ifatm.ifnet, "bad INTR RBRQ%u\n", 599 status & HE_REGM_IGROUP); 600 break; 601 } 602 break; 603 604 case HE_REGM_ITYPE_RBRQT: 605 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP)); 606 sc->istats.itype_rbrqt++; 607 switch (status & HE_REGM_IGROUP) { 608 609 case 0: 610 he_intr_rbrq(sc, &sc->rbrq_0, 0); 611 break; 612 613 case 1: 614 if (sc->rbrq_1.size > 0) { 615 he_intr_rbrq(sc, &sc->rbrq_1, 1); 616 break; 617 } 618 /* FALLTHRU */ 619 620 default: 621 if_printf(&sc->ifatm.ifnet, "bad INTR RBRQT%u\n", 622 status & HE_REGM_IGROUP); 623 break; 624 } 625 break; 626 627 case HE_REGM_ITYPE_PHYS: 628 sc->istats.itype_phys++; 629 utopia_intr(&sc->utopia); 630 break; 631 632#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID 633 case HE_REGM_ITYPE_UNKNOWN: 634 sc->istats.itype_unknown++; 635 if_printf(&sc->ifatm.ifnet, "bad interrupt\n"); 636 break; 637#endif 638 639 case HE_REGM_ITYPE_ERR: 640 sc->istats.itype_err++; 641 switch (status) { 642 643 case HE_REGM_ITYPE_PERR: 644 if_printf(&sc->ifatm.ifnet, "parity error\n"); 645 break; 646 647 case HE_REGM_ITYPE_ABORT: 648 if_printf(&sc->ifatm.ifnet, "abort interrupt " 649 "addr=0x%08x\n", 650 READ4(sc, HE_REGO_ABORT_ADDR)); 651 break; 652 653 default: 654 if_printf(&sc->ifatm.ifnet, 655 "bad interrupt type %08x\n", status); 656 break; 657 } 658 break; 659 660 case HE_REGM_ITYPE_INVALID: 661 /* this is the documented fix for the ISW bug 8.1.1 662 * Note, that the documented fix is partly wrong: 663 * the ISWs should be intialized to 0xf8 not 0xff */ 664 sc->istats.bug_bad_isw++; 665 DBG(sc, INTR, ("hatm: invalid ISW bug triggered")); 666 he_intr_tbrq(sc, &sc->tbrq, 0); 667 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 668 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 669 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 670 he_intr_rbrq(sc, &sc->rbrq_0, 0); 671 he_intr_rbrq(sc, &sc->rbrq_1, 1); 672 utopia_intr(&sc->utopia); 673 break; 674 675 default: 676 if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n", 677 status); 678 break; 679 } 680 } 681 682 /* write back head to clear queue */ 683 WRITE4(sc, HE_REGO_IRQ_HEAD(0), 684 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | 685 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) | 686 (q->head << HE_REGS_IRQ_HEAD_HEAD)); 687 BARRIER_W(sc); 688 689 /* workaround the back-to-back irq access problem (8.1.2) */ 690 (void)READ4(sc, HE_REGO_INT_FIFO); 691 BARRIER_R(sc); 692 693 mtx_unlock(&sc->mtx); 694} 695