if_hatm_intr.c revision 175872
1/*- 2 * Copyright (c) 2001-2003 3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus). 4 * All rights reserved. 5 * Author: Hartmut Brandt <harti@freebsd.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm_intr.c 175872 2008-02-01 19:36:27Z phk $"); 31 32/* 33 * ForeHE driver. 34 * 35 * Interrupt handler. 36 */ 37 38#include "opt_inet.h" 39#include "opt_natm.h" 40 41#include <sys/types.h> 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/malloc.h> 45#include <sys/kernel.h> 46#include <sys/bus.h> 47#include <sys/errno.h> 48#include <sys/conf.h> 49#include <sys/module.h> 50#include <sys/queue.h> 51#include <sys/syslog.h> 52#include <sys/condvar.h> 53#include <sys/sysctl.h> 54#include <vm/uma.h> 55 56#include <sys/sockio.h> 57#include <sys/mbuf.h> 58#include <sys/socket.h> 59 60#include <net/if.h> 61#include <net/if_media.h> 62#include <net/if_atm.h> 63#include <net/route.h> 64#include <netinet/in.h> 65#include <netinet/if_atm.h> 66 67#include <machine/bus.h> 68#include <machine/resource.h> 69#include <sys/bus.h> 70#include <sys/rman.h> 71#include <dev/pci/pcireg.h> 72#include <dev/pci/pcivar.h> 73 74#include <dev/utopia/utopia.h> 75#include <dev/hatm/if_hatmconf.h> 76#include <dev/hatm/if_hatmreg.h> 77#include <dev/hatm/if_hatmvar.h> 78 79CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE); 80CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK); 81CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK); 82CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE); 83CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE); 84CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE); 85 86CTASSERT(MBUF0_PER_PAGE <= 256); 87CTASSERT(MBUF1_PER_PAGE <= 256); 88 89static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group); 90 91/* 92 * Free an external mbuf to a list. We use atomic functions so that 93 * we don't need a mutex for the list. 94 * 95 * Note that in general this algorithm is not safe when multiple readers 96 * and writers are present. To cite from a mail from David Schultz 97 * <das@freebsd.org>: 98 * 99 * It looks like this is subject to the ABA problem. For instance, 100 * suppose X, Y, and Z are the top things on the freelist and a 101 * thread attempts to make an allocation. You set buf to X and load 102 * buf->link (Y) into a register. Then the thread get preempted, and 103 * another thread allocates both X and Y, then frees X. When the 104 * original thread gets the CPU again, X is still on top of the 105 * freelist, so the atomic operation succeeds. However, the atomic 106 * op places Y on top of the freelist, even though Y is no longer 107 * free. 108 * 109 * We are, however sure that we have only one thread that ever allocates 110 * buffers because the only place we're call from is the interrupt handler. 111 * Under these circumstances the code looks safe. 112 */ 113void 114hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf) 115{ 116 for (;;) { 117 buf->link = *list; 118 if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link, 119 (uintptr_t)buf)) 120 break; 121 } 122} 123 124static __inline struct mbufx_free * 125hatm_ext_alloc(struct hatm_softc *sc, u_int g) 126{ 127 struct mbufx_free *buf; 128 129 for (;;) { 130 if ((buf = sc->mbuf_list[g]) == NULL) 131 break; 132 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g], 133 (uintptr_t)buf, (uintptr_t)buf->link)) 134 break; 135 } 136 if (buf == NULL) { 137 hatm_mbuf_page_alloc(sc, g); 138 for (;;) { 139 if ((buf = sc->mbuf_list[g]) == NULL) 140 break; 141 if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g], 142 (uintptr_t)buf, (uintptr_t)buf->link)) 143 break; 144 } 145 } 146 return (buf); 147} 148 149/* 150 * Either the queue treshold was crossed or a TPD with the INTR bit set 151 * was transmitted. 152 */ 153static void 154he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group) 155{ 156 uint32_t *tailp = &sc->hsp->group[group].tbrq_tail; 157 u_int no; 158 159 while (q->head != (*tailp >> 2)) { 160 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >> 161 HE_REGS_TPD_ADDR; 162 hatm_tx_complete(sc, TPD_ADDR(sc, no), 163 (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS)); 164 165 if (++q->head == q->size) 166 q->head = 0; 167 } 168 WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2); 169} 170 171/* 172 * DMA loader function for external mbuf page. 173 */ 174static void 175hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, 176 int error) 177{ 178 if (error) { 179 printf("%s: mapping error %d\n", __func__, error); 180 return; 181 } 182 KASSERT(nsegs == 1, 183 ("too many segments for DMA: %d", nsegs)); 184 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 185 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 186 187 *(uint32_t *)arg = segs[0].ds_addr; 188} 189 190/* 191 * Allocate a page of external mbuf storage for the small pools. 192 * Create a DMA map and load it. Put all the chunks onto the right 193 * free list. 194 */ 195static void 196hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group) 197{ 198 struct mbuf_page *pg; 199 int err; 200 u_int i; 201 202 if (sc->mbuf_npages == sc->mbuf_max_pages) 203 return; 204 if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) 205 return; 206 207 err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map); 208 if (err != 0) { 209 if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n", 210 __func__, err); 211 free(pg, M_DEVBUF); 212 return; 213 } 214 err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE, 215 hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT); 216 if (err != 0) { 217 if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n", 218 __func__, err); 219 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); 220 free(pg, M_DEVBUF); 221 return; 222 } 223 224 sc->mbuf_pages[sc->mbuf_npages] = pg; 225 226 if (group == 0) { 227 struct mbuf0_chunk *c; 228 229 pg->hdr.pool = 0; 230 pg->hdr.nchunks = MBUF0_PER_PAGE; 231 pg->hdr.chunksize = MBUF0_CHUNK; 232 pg->hdr.hdroff = sizeof(c->storage); 233 c = (struct mbuf0_chunk *)pg; 234 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) { 235 c->hdr.pageno = sc->mbuf_npages; 236 c->hdr.chunkno = i; 237 c->hdr.flags = 0; 238 hatm_ext_free(&sc->mbuf_list[0], 239 (struct mbufx_free *)c); 240 } 241 } else { 242 struct mbuf1_chunk *c; 243 244 pg->hdr.pool = 1; 245 pg->hdr.nchunks = MBUF1_PER_PAGE; 246 pg->hdr.chunksize = MBUF1_CHUNK; 247 pg->hdr.hdroff = sizeof(c->storage); 248 c = (struct mbuf1_chunk *)pg; 249 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) { 250 c->hdr.pageno = sc->mbuf_npages; 251 c->hdr.chunkno = i; 252 c->hdr.flags = 0; 253 hatm_ext_free(&sc->mbuf_list[1], 254 (struct mbufx_free *)c); 255 } 256 } 257 sc->mbuf_npages++; 258} 259 260/* 261 * Free an mbuf and put it onto the free list. 262 */ 263static void 264hatm_mbuf0_free(void *buf, void *args) 265{ 266 struct hatm_softc *sc = args; 267 struct mbuf0_chunk *c = buf; 268 269 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED, 270 ("freeing unused mbuf %x", c->hdr.flags)); 271 c->hdr.flags &= ~MBUF_USED; 272 hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c); 273} 274static void 275hatm_mbuf1_free(void *buf, void *args) 276{ 277 struct hatm_softc *sc = args; 278 struct mbuf1_chunk *c = buf; 279 280 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED, 281 ("freeing unused mbuf %x", c->hdr.flags)); 282 c->hdr.flags &= ~MBUF_USED; 283 hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c); 284} 285 286static void 287hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 288{ 289 uint32_t *ptr = (uint32_t *)arg; 290 291 if (nsegs == 0) { 292 printf("%s: error=%d\n", __func__, error); 293 return; 294 } 295 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs)); 296 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 297 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 298 299 *ptr = segs[0].ds_addr; 300} 301 302/* 303 * Receive buffer pool interrupt. This means the number of entries in the 304 * queue has dropped below the threshold. Try to supply new buffers. 305 */ 306static void 307he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large, 308 u_int group) 309{ 310 u_int ntail; 311 struct mbuf *m; 312 int error; 313 struct mbufx_free *cf; 314 struct mbuf_page *pg; 315 struct mbuf0_chunk *buf0; 316 struct mbuf1_chunk *buf1; 317 318 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u", 319 large ? "large" : "small", group)); 320 321 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD) 322 & (rbp->size - 1); 323 324 for (;;) { 325 if ((ntail = rbp->tail + 1) == rbp->size) 326 ntail = 0; 327 if (ntail == rbp->head) 328 break; 329 m = NULL; 330 331 if (large) { 332 /* allocate the MBUF */ 333 if ((m = m_getcl(M_DONTWAIT, MT_DATA, 334 M_PKTHDR)) == NULL) { 335 if_printf(sc->ifp, 336 "no mbuf clusters\n"); 337 break; 338 } 339 m->m_data += MBUFL_OFFSET; 340 341 if (sc->lbufs[sc->lbufs_next] != NULL) 342 panic("hatm: lbufs full %u", sc->lbufs_next); 343 sc->lbufs[sc->lbufs_next] = m; 344 345 if ((error = bus_dmamap_load(sc->mbuf_tag, 346 sc->rmaps[sc->lbufs_next], 347 m->m_data, rbp->bsize, hatm_mbuf_helper, 348 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0) 349 panic("hatm: mbuf mapping failed %d", error); 350 351 bus_dmamap_sync(sc->mbuf_tag, 352 sc->rmaps[sc->lbufs_next], 353 BUS_DMASYNC_PREREAD); 354 355 rbp->rbp[rbp->tail].handle = 356 MBUF_MAKE_LHANDLE(sc->lbufs_next); 357 358 if (++sc->lbufs_next == sc->lbufs_size) 359 sc->lbufs_next = 0; 360 361 } else if (group == 0) { 362 /* 363 * Allocate small buffer in group 0 364 */ 365 if ((cf = hatm_ext_alloc(sc, 0)) == NULL) 366 break; 367 buf0 = (struct mbuf0_chunk *)cf; 368 pg = sc->mbuf_pages[buf0->hdr.pageno]; 369 buf0->hdr.flags |= MBUF_CARD; 370 rbp->rbp[rbp->tail].phys = pg->hdr.phys + 371 buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET; 372 rbp->rbp[rbp->tail].handle = 373 MBUF_MAKE_HANDLE(buf0->hdr.pageno, 374 buf0->hdr.chunkno); 375 376 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, 377 BUS_DMASYNC_PREREAD); 378 379 } else if (group == 1) { 380 /* 381 * Allocate small buffer in group 1 382 */ 383 if ((cf = hatm_ext_alloc(sc, 1)) == NULL) 384 break; 385 buf1 = (struct mbuf1_chunk *)cf; 386 pg = sc->mbuf_pages[buf1->hdr.pageno]; 387 buf1->hdr.flags |= MBUF_CARD; 388 rbp->rbp[rbp->tail].phys = pg->hdr.phys + 389 buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET; 390 rbp->rbp[rbp->tail].handle = 391 MBUF_MAKE_HANDLE(buf1->hdr.pageno, 392 buf1->hdr.chunkno); 393 394 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, 395 BUS_DMASYNC_PREREAD); 396 397 } else 398 /* ups */ 399 break; 400 401 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x", 402 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys)); 403 404 rbp->tail = ntail; 405 } 406 WRITE4(sc, HE_REGO_RBP_T(large, group), 407 (rbp->tail << HE_REGS_RBP_TAIL)); 408} 409 410/* 411 * Extract the buffer and hand it to the receive routine 412 */ 413static struct mbuf * 414hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle) 415{ 416 u_int pageno; 417 u_int chunkno; 418 struct mbuf *m; 419 420 if (handle & MBUF_LARGE_FLAG) { 421 /* large buffer - sync and unload */ 422 MBUF_PARSE_LHANDLE(handle, handle); 423 DBG(sc, RX, ("RX large handle=%x", handle)); 424 425 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle], 426 BUS_DMASYNC_POSTREAD); 427 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]); 428 429 m = sc->lbufs[handle]; 430 sc->lbufs[handle] = NULL; 431 432 return (m); 433 } 434 435 MBUF_PARSE_HANDLE(handle, pageno, chunkno); 436 437 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle, 438 pageno, chunkno)); 439 440 MGETHDR(m, M_DONTWAIT, MT_DATA); 441 442 if (group == 0) { 443 struct mbuf0_chunk *c0; 444 445 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno; 446 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u", 447 c0->hdr.pageno, pageno)); 448 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u", 449 c0->hdr.chunkno, chunkno)); 450 KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u", 451 pageno, chunkno)); 452 KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u", 453 pageno, chunkno)); 454 455 c0->hdr.flags |= MBUF_USED; 456 c0->hdr.flags &= ~MBUF_CARD; 457 458 if (m != NULL) { 459 m->m_ext.ref_cnt = &c0->hdr.ref_cnt; 460 MEXTADD(m, (void *)c0, MBUF0_SIZE, 461 hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF); 462 m->m_data += MBUF0_OFFSET; 463 } else 464 hatm_mbuf0_free(c0, sc); 465 466 } else { 467 struct mbuf1_chunk *c1; 468 469 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno; 470 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u", 471 c1->hdr.pageno, pageno)); 472 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u", 473 c1->hdr.chunkno, chunkno)); 474 KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u", 475 pageno, chunkno)); 476 KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u", 477 pageno, chunkno)); 478 479 c1->hdr.flags |= MBUF_USED; 480 c1->hdr.flags &= ~MBUF_CARD; 481 482 if (m != NULL) { 483 m->m_ext.ref_cnt = &c1->hdr.ref_cnt; 484 MEXTADD(m, (void *)c1, MBUF1_SIZE, 485 hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF); 486 m->m_data += MBUF1_OFFSET; 487 } else 488 hatm_mbuf1_free(c1, sc); 489 } 490 491 return (m); 492} 493 494/* 495 * Interrupt because of receive buffer returned. 496 */ 497static void 498he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) 499{ 500 struct he_rbrqen *e; 501 uint32_t flags, tail; 502 u_int cid, len; 503 struct mbuf *m; 504 505 for (;;) { 506 tail = sc->hsp->group[group].rbrq_tail >> 3; 507 508 if (rq->head == tail) 509 break; 510 511 e = &rq->rbrq[rq->head]; 512 513 flags = e->addr & HE_REGM_RBRQ_FLAGS; 514 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR)) 515 m = hatm_rx_buffer(sc, group, e->addr); 516 else 517 m = NULL; 518 519 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID; 520 len = 4 * (e->len & HE_REGM_RBRQ_LEN); 521 522 hatm_rx(sc, cid, flags, m, len); 523 524 if (++rq->head == rq->size) 525 rq->head = 0; 526 } 527 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3); 528} 529 530void 531hatm_intr(void *p) 532{ 533 struct heirq *q = p; 534 struct hatm_softc *sc = q->sc; 535 u_int status; 536 u_int tail; 537 538 /* if we have a stray interrupt with a non-initialized card, 539 * we cannot even lock before looking at the flag */ 540 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) 541 return; 542 543 mtx_lock(&sc->mtx); 544 (void)READ4(sc, HE_REGO_INT_FIFO); 545 546 tail = *q->tailp; 547 if (q->head == tail) { 548 /* workaround for tail pointer not updated bug (8.1.1) */ 549 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered")); 550 551 /* read the tail pointer from the card */ 552 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) & 553 HE_REGM_IRQ_BASE_TAIL; 554 BARRIER_R(sc); 555 556 sc->istats.bug_no_irq_upd++; 557 } 558 559 /* clear the interrupt */ 560 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA); 561 BARRIER_W(sc); 562 563 while (q->head != tail) { 564 status = q->irq[q->head]; 565 q->irq[q->head] = HE_REGM_ITYPE_INVALID; 566 if (++q->head == (q->size - 1)) 567 q->head = 0; 568 569 switch (status & HE_REGM_ITYPE) { 570 571 case HE_REGM_ITYPE_TBRQ: 572 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP)); 573 sc->istats.itype_tbrq++; 574 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 575 break; 576 577 case HE_REGM_ITYPE_TPD: 578 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP)); 579 sc->istats.itype_tpd++; 580 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 581 break; 582 583 case HE_REGM_ITYPE_RBPS: 584 sc->istats.itype_rbps++; 585 switch (status & HE_REGM_IGROUP) { 586 587 case 0: 588 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 589 break; 590 591 case 1: 592 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 593 break; 594 595 default: 596 if_printf(sc->ifp, "bad INTR RBPS%u\n", 597 status & HE_REGM_IGROUP); 598 break; 599 } 600 break; 601 602 case HE_REGM_ITYPE_RBPL: 603 sc->istats.itype_rbpl++; 604 switch (status & HE_REGM_IGROUP) { 605 606 case 0: 607 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 608 break; 609 610 default: 611 if_printf(sc->ifp, "bad INTR RBPL%u\n", 612 status & HE_REGM_IGROUP); 613 break; 614 } 615 break; 616 617 case HE_REGM_ITYPE_RBRQ: 618 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP)); 619 sc->istats.itype_rbrq++; 620 switch (status & HE_REGM_IGROUP) { 621 622 case 0: 623 he_intr_rbrq(sc, &sc->rbrq_0, 0); 624 break; 625 626 case 1: 627 if (sc->rbrq_1.size > 0) { 628 he_intr_rbrq(sc, &sc->rbrq_1, 1); 629 break; 630 } 631 /* FALLTHRU */ 632 633 default: 634 if_printf(sc->ifp, "bad INTR RBRQ%u\n", 635 status & HE_REGM_IGROUP); 636 break; 637 } 638 break; 639 640 case HE_REGM_ITYPE_RBRQT: 641 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP)); 642 sc->istats.itype_rbrqt++; 643 switch (status & HE_REGM_IGROUP) { 644 645 case 0: 646 he_intr_rbrq(sc, &sc->rbrq_0, 0); 647 break; 648 649 case 1: 650 if (sc->rbrq_1.size > 0) { 651 he_intr_rbrq(sc, &sc->rbrq_1, 1); 652 break; 653 } 654 /* FALLTHRU */ 655 656 default: 657 if_printf(sc->ifp, "bad INTR RBRQT%u\n", 658 status & HE_REGM_IGROUP); 659 break; 660 } 661 break; 662 663 case HE_REGM_ITYPE_PHYS: 664 sc->istats.itype_phys++; 665 utopia_intr(&sc->utopia); 666 break; 667 668#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID 669 case HE_REGM_ITYPE_UNKNOWN: 670 sc->istats.itype_unknown++; 671 if_printf(sc->ifp, "bad interrupt\n"); 672 break; 673#endif 674 675 case HE_REGM_ITYPE_ERR: 676 sc->istats.itype_err++; 677 switch (status) { 678 679 case HE_REGM_ITYPE_PERR: 680 if_printf(sc->ifp, "parity error\n"); 681 break; 682 683 case HE_REGM_ITYPE_ABORT: 684 if_printf(sc->ifp, "abort interrupt " 685 "addr=0x%08x\n", 686 READ4(sc, HE_REGO_ABORT_ADDR)); 687 break; 688 689 default: 690 if_printf(sc->ifp, 691 "bad interrupt type %08x\n", status); 692 break; 693 } 694 break; 695 696 case HE_REGM_ITYPE_INVALID: 697 /* this is the documented fix for the ISW bug 8.1.1 698 * Note, that the documented fix is partly wrong: 699 * the ISWs should be intialized to 0xf8 not 0xff */ 700 sc->istats.bug_bad_isw++; 701 DBG(sc, INTR, ("hatm: invalid ISW bug triggered")); 702 he_intr_tbrq(sc, &sc->tbrq, 0); 703 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 704 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 705 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 706 he_intr_rbrq(sc, &sc->rbrq_0, 0); 707 he_intr_rbrq(sc, &sc->rbrq_1, 1); 708 utopia_intr(&sc->utopia); 709 break; 710 711 default: 712 if_printf(sc->ifp, "bad interrupt type %08x\n", 713 status); 714 break; 715 } 716 } 717 718 /* write back head to clear queue */ 719 WRITE4(sc, HE_REGO_IRQ_HEAD(0), 720 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | 721 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) | 722 (q->head << HE_REGS_IRQ_HEAD_HEAD)); 723 BARRIER_W(sc); 724 725 /* workaround the back-to-back irq access problem (8.1.2) */ 726 (void)READ4(sc, HE_REGO_INT_FIFO); 727 BARRIER_R(sc); 728 729 mtx_unlock(&sc->mtx); 730} 731