1193323Sed/*- 2193323Sed * Copyright (c) 2001-2003 3193323Sed * Fraunhofer Institute for Open Communication Systems (FhG Fokus). 4193323Sed * All rights reserved. 5193323Sed * Author: Hartmut Brandt <harti@freebsd.org> 6193323Sed * 7193323Sed * Redistribution and use in source and binary forms, with or without 8193323Sed * modification, are permitted provided that the following conditions 9193323Sed * are met: 10193323Sed * 1. Redistributions of source code must retain the above copyright 11193323Sed * notice, this list of conditions and the following disclaimer. 12193323Sed * 2. Redistributions in binary form must reproduce the above copyright 13193323Sed * notice, this list of conditions and the following disclaimer in the 14193323Sed * documentation and/or other materials provided with the distribution. 15193323Sed * 16193323Sed * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17193323Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18193323Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19193323Sed * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20193323Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21193323Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22193323Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23193323Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24223017Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25221345Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26218893Sdim * SUCH DAMAGE. 27193323Sed */ 28193323Sed 29193323Sed#include <sys/cdefs.h> 30193323Sed__FBSDID("$FreeBSD: releng/11.0/sys/dev/hatm/if_hatm_intr.c 268530 2014-07-11 14:34:29Z glebius $"); 31193323Sed 32193323Sed/* 33193323Sed * ForeHE driver. 34218893Sdim * 35218893Sdim * Interrupt handler. 36218893Sdim */ 37218893Sdim 38218893Sdim#include "opt_inet.h" 39218893Sdim#include "opt_natm.h" 40218893Sdim 41193323Sed#include <sys/types.h> 42223017Sdim#include <sys/param.h> 43223017Sdim#include <sys/systm.h> 44223017Sdim#include <sys/malloc.h> 45223017Sdim#include <sys/kernel.h> 46223017Sdim#include <sys/bus.h> 47223017Sdim#include <sys/errno.h> 48223017Sdim#include <sys/conf.h> 49223017Sdim#include <sys/module.h> 50223017Sdim#include <sys/queue.h> 51193323Sed#include <sys/syslog.h> 52193323Sed#include <sys/condvar.h> 53193323Sed#include <sys/sysctl.h> 54193323Sed#include <vm/uma.h> 55207618Srdivacky 56207618Srdivacky#include <sys/sockio.h> 57207618Srdivacky#include <sys/mbuf.h> 58212904Sdim#include <sys/socket.h> 59212904Sdim 60207618Srdivacky#include <net/if.h> 61207618Srdivacky#include <net/if_var.h> 62207618Srdivacky#include <net/if_media.h> 63207618Srdivacky#include <net/if_atm.h> 64207618Srdivacky#include <net/route.h> 65212904Sdim#include <netinet/in.h> 66212904Sdim#include <netinet/if_atm.h> 67212904Sdim 68212904Sdim#include <machine/bus.h> 69212904Sdim#include <machine/resource.h> 70193323Sed#include <sys/bus.h> 71193323Sed#include <sys/rman.h> 72212904Sdim#include <dev/pci/pcireg.h> 73207618Srdivacky#include <dev/pci/pcivar.h> 74212904Sdim 75207618Srdivacky#include <dev/utopia/utopia.h> 76207618Srdivacky#include <dev/hatm/if_hatmconf.h> 77212904Sdim#include <dev/hatm/if_hatmreg.h> 78212904Sdim#include <dev/hatm/if_hatmvar.h> 79212904Sdim 80212904SdimCTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE); 81212904SdimCTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK); 82212904SdimCTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK); 83193323SedCTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE); 84193323SedCTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE); 85207618SrdivackyCTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE); 86207618Srdivacky 87193323SedCTASSERT(MBUF0_PER_PAGE <= 256); 88193323SedCTASSERT(MBUF1_PER_PAGE <= 256); 89193323Sed 90193323Sedstatic void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group); 91193323Sed 92223017Sdim/* 93193323Sed * Free an external mbuf to a list. We use atomic functions so that 94193323Sed * we don't need a mutex for the list. 95193323Sed * 96193323Sed * Note that in general this algorithm is not safe when multiple readers 97221345Sdim * and writers are present. To cite from a mail from David Schultz 98221345Sdim * <das@freebsd.org>: 99221345Sdim * 100221345Sdim * It looks like this is subject to the ABA problem. For instance, 101193323Sed * suppose X, Y, and Z are the top things on the freelist and a 102193323Sed * thread attempts to make an allocation. You set buf to X and load 103223017Sdim * buf->link (Y) into a register. Then the thread get preempted, and 104223017Sdim * another thread allocates both X and Y, then frees X. When the 105223017Sdim * original thread gets the CPU again, X is still on top of the 106223017Sdim * freelist, so the atomic operation succeeds. However, the atomic 107223017Sdim * op places Y on top of the freelist, even though Y is no longer 108193323Sed * free. 109193323Sed * 110193323Sed * We are, however sure that we have only one thread that ever allocates 111193323Sed * buffers because the only place we're call from is the interrupt handler. 112198090Srdivacky * Under these circumstances the code looks safe. 113193323Sed */ 114207618Srdivackyvoid 115207618Srdivackyhatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf) 116212904Sdim{ 117199511Srdivacky for (;;) { 118199511Srdivacky buf->link = *list; 119193323Sed if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link, 120193323Sed (uintptr_t)buf)) 121193323Sed break; 122193323Sed } 123193323Sed} 124193323Sed 125193323Sedstatic __inline struct mbufx_free * 126193323Sedhatm_ext_alloc(struct hatm_softc *sc, u_int g) 127193323Sed{ 128193323Sed struct mbufx_free *buf; 129193323Sed 130193323Sed for (;;) { 131193323Sed if ((buf = sc->mbuf_list[g]) == NULL) 132193323Sed break; 133204642Srdivacky if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g], 134193323Sed (uintptr_t)buf, (uintptr_t)buf->link)) 135193323Sed break; 136193323Sed } 137193323Sed if (buf == NULL) { 138193323Sed hatm_mbuf_page_alloc(sc, g); 139223017Sdim for (;;) { 140201360Srdivacky if ((buf = sc->mbuf_list[g]) == NULL) 141198090Srdivacky break; 142193323Sed if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g], 143193323Sed (uintptr_t)buf, (uintptr_t)buf->link)) 144193323Sed break; 145223017Sdim } 146201360Srdivacky } 147198090Srdivacky return (buf); 148193323Sed} 149193323Sed 150193323Sed/* 151193323Sed * Either the queue treshold was crossed or a TPD with the INTR bit set 152207618Srdivacky * was transmitted. 153223017Sdim */ 154207618Srdivackystatic void 155223017Sdimhe_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group) 156223017Sdim{ 157223017Sdim uint32_t *tailp = &sc->hsp->group[group].tbrq_tail; 158207618Srdivacky u_int no; 159207618Srdivacky 160207618Srdivacky while (q->head != (*tailp >> 2)) { 161207618Srdivacky no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >> 162207618Srdivacky HE_REGS_TPD_ADDR; 163193323Sed hatm_tx_complete(sc, TPD_ADDR(sc, no), 164193323Sed (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS)); 165193323Sed 166193323Sed if (++q->head == q->size) 167193323Sed q->head = 0; 168223017Sdim } 169223017Sdim WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2); 170203954Srdivacky} 171199481Srdivacky 172199481Srdivacky/* 173193323Sed * DMA loader function for external mbuf page. 174218893Sdim */ 175218893Sdimstatic void 176193323Sedhatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, 177193323Sed int error) 178193323Sed{ 179193323Sed if (error) { 180193323Sed printf("%s: mapping error %d\n", __func__, error); 181193323Sed return; 182193323Sed } 183193323Sed KASSERT(nsegs == 1, 184193323Sed ("too many segments for DMA: %d", nsegs)); 185193323Sed KASSERT(segs[0].ds_addr <= 0xffffffffLU, 186193323Sed ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 187193323Sed 188193323Sed *(uint32_t *)arg = segs[0].ds_addr; 189193323Sed} 190193323Sed 191221345Sdim/* 192193323Sed * Allocate a page of external mbuf storage for the small pools. 193193323Sed * Create a DMA map and load it. Put all the chunks onto the right 194193323Sed * free list. 195193323Sed */ 196193323Sedstatic void 197223017Sdimhatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group) 198193323Sed{ 199193323Sed struct mbuf_page *pg; 200193323Sed int err; 201193323Sed u_int i; 202193323Sed 203193323Sed if (sc->mbuf_npages == sc->mbuf_max_pages) 204193323Sed return; 205193323Sed if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) 206193323Sed return; 207193323Sed 208193323Sed err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map); 209193323Sed if (err != 0) { 210193323Sed if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n", 211203954Srdivacky __func__, err); 212203954Srdivacky free(pg, M_DEVBUF); 213203954Srdivacky return; 214203954Srdivacky } 215207618Srdivacky err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE, 216207618Srdivacky hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT); 217207618Srdivacky if (err != 0) { 218207618Srdivacky if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n", 219207618Srdivacky __func__, err); 220207618Srdivacky bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); 221207618Srdivacky free(pg, M_DEVBUF); 222207618Srdivacky return; 223203954Srdivacky } 224212904Sdim 225203954Srdivacky sc->mbuf_pages[sc->mbuf_npages] = pg; 226193323Sed 227193323Sed if (group == 0) { 228193323Sed struct mbuf0_chunk *c; 229193323Sed 230201360Srdivacky pg->hdr.pool = 0; 231193323Sed pg->hdr.nchunks = MBUF0_PER_PAGE; 232193323Sed pg->hdr.chunksize = MBUF0_CHUNK; 233193323Sed pg->hdr.hdroff = sizeof(c->storage); 234201360Srdivacky c = (struct mbuf0_chunk *)pg; 235200581Srdivacky for (i = 0; i < MBUF0_PER_PAGE; i++, c++) { 236193323Sed c->hdr.pageno = sc->mbuf_npages; 237193323Sed c->hdr.chunkno = i; 238207618Srdivacky c->hdr.flags = 0; 239207618Srdivacky hatm_ext_free(&sc->mbuf_list[0], 240207618Srdivacky (struct mbufx_free *)c); 241207618Srdivacky } 242207618Srdivacky } else { 243207618Srdivacky struct mbuf1_chunk *c; 244207618Srdivacky 245207618Srdivacky pg->hdr.pool = 1; 246207618Srdivacky pg->hdr.nchunks = MBUF1_PER_PAGE; 247207618Srdivacky pg->hdr.chunksize = MBUF1_CHUNK; 248207618Srdivacky pg->hdr.hdroff = sizeof(c->storage); 249207618Srdivacky c = (struct mbuf1_chunk *)pg; 250207618Srdivacky for (i = 0; i < MBUF1_PER_PAGE; i++, c++) { 251207618Srdivacky c->hdr.pageno = sc->mbuf_npages; 252207618Srdivacky c->hdr.chunkno = i; 253207618Srdivacky c->hdr.flags = 0; 254207618Srdivacky hatm_ext_free(&sc->mbuf_list[1], 255207618Srdivacky (struct mbufx_free *)c); 256207618Srdivacky } 257207618Srdivacky } 258207618Srdivacky sc->mbuf_npages++; 259207618Srdivacky} 260207618Srdivacky 261207618Srdivacky/* 262207618Srdivacky * Free an mbuf and put it onto the free list. 263207618Srdivacky */ 264207618Srdivackystatic void 265207618Srdivackyhatm_mbuf0_free(struct mbuf *m, void *buf, void *args) 266207618Srdivacky{ 267207618Srdivacky struct hatm_softc *sc = args; 268207618Srdivacky struct mbuf0_chunk *c = buf; 269207618Srdivacky 270207618Srdivacky KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED, 271207618Srdivacky ("freeing unused mbuf %x", c->hdr.flags)); 272207618Srdivacky c->hdr.flags &= ~MBUF_USED; 273207618Srdivacky hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c); 274207618Srdivacky} 275207618Srdivackystatic void 276207618Srdivackyhatm_mbuf1_free(struct mbuf *m, void *buf, void *args) 277207618Srdivacky{ 278207618Srdivacky struct hatm_softc *sc = args; 279207618Srdivacky struct mbuf1_chunk *c = buf; 280207618Srdivacky 281193323Sed KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED, 282193323Sed ("freeing unused mbuf %x", c->hdr.flags)); 283203954Srdivacky c->hdr.flags &= ~MBUF_USED; 284193323Sed hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c); 285193323Sed} 286 287static void 288hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 289{ 290 uint32_t *ptr = (uint32_t *)arg; 291 292 if (nsegs == 0) { 293 printf("%s: error=%d\n", __func__, error); 294 return; 295 } 296 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs)); 297 KASSERT(segs[0].ds_addr <= 0xffffffffLU, 298 ("phys addr too large %lx", (u_long)segs[0].ds_addr)); 299 300 *ptr = segs[0].ds_addr; 301} 302 303/* 304 * Receive buffer pool interrupt. This means the number of entries in the 305 * queue has dropped below the threshold. Try to supply new buffers. 306 */ 307static void 308he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large, 309 u_int group) 310{ 311 u_int ntail; 312 struct mbuf *m; 313 int error; 314 struct mbufx_free *cf; 315 struct mbuf_page *pg; 316 struct mbuf0_chunk *buf0; 317 struct mbuf1_chunk *buf1; 318 319 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u", 320 large ? "large" : "small", group)); 321 322 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD) 323 & (rbp->size - 1); 324 325 for (;;) { 326 if ((ntail = rbp->tail + 1) == rbp->size) 327 ntail = 0; 328 if (ntail == rbp->head) 329 break; 330 m = NULL; 331 332 if (large) { 333 /* allocate the MBUF */ 334 if ((m = m_getcl(M_NOWAIT, MT_DATA, 335 M_PKTHDR)) == NULL) { 336 if_printf(sc->ifp, 337 "no mbuf clusters\n"); 338 break; 339 } 340 m->m_data += MBUFL_OFFSET; 341 342 if (sc->lbufs[sc->lbufs_next] != NULL) 343 panic("hatm: lbufs full %u", sc->lbufs_next); 344 sc->lbufs[sc->lbufs_next] = m; 345 346 if ((error = bus_dmamap_load(sc->mbuf_tag, 347 sc->rmaps[sc->lbufs_next], 348 m->m_data, rbp->bsize, hatm_mbuf_helper, 349 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0) 350 panic("hatm: mbuf mapping failed %d", error); 351 352 bus_dmamap_sync(sc->mbuf_tag, 353 sc->rmaps[sc->lbufs_next], 354 BUS_DMASYNC_PREREAD); 355 356 rbp->rbp[rbp->tail].handle = 357 MBUF_MAKE_LHANDLE(sc->lbufs_next); 358 359 if (++sc->lbufs_next == sc->lbufs_size) 360 sc->lbufs_next = 0; 361 362 } else if (group == 0) { 363 /* 364 * Allocate small buffer in group 0 365 */ 366 if ((cf = hatm_ext_alloc(sc, 0)) == NULL) 367 break; 368 buf0 = (struct mbuf0_chunk *)cf; 369 pg = sc->mbuf_pages[buf0->hdr.pageno]; 370 buf0->hdr.flags |= MBUF_CARD; 371 rbp->rbp[rbp->tail].phys = pg->hdr.phys + 372 buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET; 373 rbp->rbp[rbp->tail].handle = 374 MBUF_MAKE_HANDLE(buf0->hdr.pageno, 375 buf0->hdr.chunkno); 376 377 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, 378 BUS_DMASYNC_PREREAD); 379 380 } else if (group == 1) { 381 /* 382 * Allocate small buffer in group 1 383 */ 384 if ((cf = hatm_ext_alloc(sc, 1)) == NULL) 385 break; 386 buf1 = (struct mbuf1_chunk *)cf; 387 pg = sc->mbuf_pages[buf1->hdr.pageno]; 388 buf1->hdr.flags |= MBUF_CARD; 389 rbp->rbp[rbp->tail].phys = pg->hdr.phys + 390 buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET; 391 rbp->rbp[rbp->tail].handle = 392 MBUF_MAKE_HANDLE(buf1->hdr.pageno, 393 buf1->hdr.chunkno); 394 395 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, 396 BUS_DMASYNC_PREREAD); 397 398 } else 399 /* ups */ 400 break; 401 402 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x", 403 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys)); 404 405 rbp->tail = ntail; 406 } 407 WRITE4(sc, HE_REGO_RBP_T(large, group), 408 (rbp->tail << HE_REGS_RBP_TAIL)); 409} 410 411/* 412 * Extract the buffer and hand it to the receive routine 413 */ 414static struct mbuf * 415hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle) 416{ 417 u_int pageno; 418 u_int chunkno; 419 struct mbuf *m; 420 421 if (handle & MBUF_LARGE_FLAG) { 422 /* large buffer - sync and unload */ 423 MBUF_PARSE_LHANDLE(handle, handle); 424 DBG(sc, RX, ("RX large handle=%x", handle)); 425 426 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle], 427 BUS_DMASYNC_POSTREAD); 428 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]); 429 430 m = sc->lbufs[handle]; 431 sc->lbufs[handle] = NULL; 432 433 return (m); 434 } 435 436 MBUF_PARSE_HANDLE(handle, pageno, chunkno); 437 438 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle, 439 pageno, chunkno)); 440 441 MGETHDR(m, M_NOWAIT, MT_DATA); 442 443 if (group == 0) { 444 struct mbuf0_chunk *c0; 445 446 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno; 447 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u", 448 c0->hdr.pageno, pageno)); 449 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u", 450 c0->hdr.chunkno, chunkno)); 451 KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u", 452 pageno, chunkno)); 453 KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u", 454 pageno, chunkno)); 455 456 c0->hdr.flags |= MBUF_USED; 457 c0->hdr.flags &= ~MBUF_CARD; 458 459 if (m != NULL) { 460 m->m_ext.ext_cnt = &c0->hdr.ref_cnt; 461 MEXTADD(m, (void *)c0, MBUF0_SIZE, 462 hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF); 463 m->m_data += MBUF0_OFFSET; 464 } else 465 (void)hatm_mbuf0_free(NULL, c0, sc); 466 467 } else { 468 struct mbuf1_chunk *c1; 469 470 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno; 471 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u", 472 c1->hdr.pageno, pageno)); 473 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u", 474 c1->hdr.chunkno, chunkno)); 475 KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u", 476 pageno, chunkno)); 477 KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u", 478 pageno, chunkno)); 479 480 c1->hdr.flags |= MBUF_USED; 481 c1->hdr.flags &= ~MBUF_CARD; 482 483 if (m != NULL) { 484 m->m_ext.ext_cnt = &c1->hdr.ref_cnt; 485 MEXTADD(m, (void *)c1, MBUF1_SIZE, 486 hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF); 487 m->m_data += MBUF1_OFFSET; 488 } else 489 (void)hatm_mbuf1_free(NULL, c1, sc); 490 } 491 492 return (m); 493} 494 495/* 496 * Interrupt because of receive buffer returned. 497 */ 498static void 499he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) 500{ 501 struct he_rbrqen *e; 502 uint32_t flags, tail; 503 u_int cid, len; 504 struct mbuf *m; 505 506 for (;;) { 507 tail = sc->hsp->group[group].rbrq_tail >> 3; 508 509 if (rq->head == tail) 510 break; 511 512 e = &rq->rbrq[rq->head]; 513 514 flags = e->addr & HE_REGM_RBRQ_FLAGS; 515 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR)) 516 m = hatm_rx_buffer(sc, group, e->addr); 517 else 518 m = NULL; 519 520 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID; 521 len = 4 * (e->len & HE_REGM_RBRQ_LEN); 522 523 hatm_rx(sc, cid, flags, m, len); 524 525 if (++rq->head == rq->size) 526 rq->head = 0; 527 } 528 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3); 529} 530 531void 532hatm_intr(void *p) 533{ 534 struct heirq *q = p; 535 struct hatm_softc *sc = q->sc; 536 u_int status; 537 u_int tail; 538 539 /* if we have a stray interrupt with a non-initialized card, 540 * we cannot even lock before looking at the flag */ 541 if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING)) 542 return; 543 544 mtx_lock(&sc->mtx); 545 (void)READ4(sc, HE_REGO_INT_FIFO); 546 547 tail = *q->tailp; 548 if (q->head == tail) { 549 /* workaround for tail pointer not updated bug (8.1.1) */ 550 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered")); 551 552 /* read the tail pointer from the card */ 553 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) & 554 HE_REGM_IRQ_BASE_TAIL; 555 BARRIER_R(sc); 556 557 sc->istats.bug_no_irq_upd++; 558 } 559 560 /* clear the interrupt */ 561 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA); 562 BARRIER_W(sc); 563 564 while (q->head != tail) { 565 status = q->irq[q->head]; 566 q->irq[q->head] = HE_REGM_ITYPE_INVALID; 567 if (++q->head == (q->size - 1)) 568 q->head = 0; 569 570 switch (status & HE_REGM_ITYPE) { 571 572 case HE_REGM_ITYPE_TBRQ: 573 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP)); 574 sc->istats.itype_tbrq++; 575 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 576 break; 577 578 case HE_REGM_ITYPE_TPD: 579 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP)); 580 sc->istats.itype_tpd++; 581 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); 582 break; 583 584 case HE_REGM_ITYPE_RBPS: 585 sc->istats.itype_rbps++; 586 switch (status & HE_REGM_IGROUP) { 587 588 case 0: 589 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 590 break; 591 592 case 1: 593 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 594 break; 595 596 default: 597 if_printf(sc->ifp, "bad INTR RBPS%u\n", 598 status & HE_REGM_IGROUP); 599 break; 600 } 601 break; 602 603 case HE_REGM_ITYPE_RBPL: 604 sc->istats.itype_rbpl++; 605 switch (status & HE_REGM_IGROUP) { 606 607 case 0: 608 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 609 break; 610 611 default: 612 if_printf(sc->ifp, "bad INTR RBPL%u\n", 613 status & HE_REGM_IGROUP); 614 break; 615 } 616 break; 617 618 case HE_REGM_ITYPE_RBRQ: 619 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP)); 620 sc->istats.itype_rbrq++; 621 switch (status & HE_REGM_IGROUP) { 622 623 case 0: 624 he_intr_rbrq(sc, &sc->rbrq_0, 0); 625 break; 626 627 case 1: 628 if (sc->rbrq_1.size > 0) { 629 he_intr_rbrq(sc, &sc->rbrq_1, 1); 630 break; 631 } 632 /* FALLTHRU */ 633 634 default: 635 if_printf(sc->ifp, "bad INTR RBRQ%u\n", 636 status & HE_REGM_IGROUP); 637 break; 638 } 639 break; 640 641 case HE_REGM_ITYPE_RBRQT: 642 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP)); 643 sc->istats.itype_rbrqt++; 644 switch (status & HE_REGM_IGROUP) { 645 646 case 0: 647 he_intr_rbrq(sc, &sc->rbrq_0, 0); 648 break; 649 650 case 1: 651 if (sc->rbrq_1.size > 0) { 652 he_intr_rbrq(sc, &sc->rbrq_1, 1); 653 break; 654 } 655 /* FALLTHRU */ 656 657 default: 658 if_printf(sc->ifp, "bad INTR RBRQT%u\n", 659 status & HE_REGM_IGROUP); 660 break; 661 } 662 break; 663 664 case HE_REGM_ITYPE_PHYS: 665 sc->istats.itype_phys++; 666 utopia_intr(&sc->utopia); 667 break; 668 669#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID 670 case HE_REGM_ITYPE_UNKNOWN: 671 sc->istats.itype_unknown++; 672 if_printf(sc->ifp, "bad interrupt\n"); 673 break; 674#endif 675 676 case HE_REGM_ITYPE_ERR: 677 sc->istats.itype_err++; 678 switch (status) { 679 680 case HE_REGM_ITYPE_PERR: 681 if_printf(sc->ifp, "parity error\n"); 682 break; 683 684 case HE_REGM_ITYPE_ABORT: 685 if_printf(sc->ifp, "abort interrupt " 686 "addr=0x%08x\n", 687 READ4(sc, HE_REGO_ABORT_ADDR)); 688 break; 689 690 default: 691 if_printf(sc->ifp, 692 "bad interrupt type %08x\n", status); 693 break; 694 } 695 break; 696 697 case HE_REGM_ITYPE_INVALID: 698 /* this is the documented fix for the ISW bug 8.1.1 699 * Note, that the documented fix is partly wrong: 700 * the ISWs should be intialized to 0xf8 not 0xff */ 701 sc->istats.bug_bad_isw++; 702 DBG(sc, INTR, ("hatm: invalid ISW bug triggered")); 703 he_intr_tbrq(sc, &sc->tbrq, 0); 704 he_intr_rbp(sc, &sc->rbp_s0, 0, 0); 705 he_intr_rbp(sc, &sc->rbp_l0, 1, 0); 706 he_intr_rbp(sc, &sc->rbp_s1, 0, 1); 707 he_intr_rbrq(sc, &sc->rbrq_0, 0); 708 he_intr_rbrq(sc, &sc->rbrq_1, 1); 709 utopia_intr(&sc->utopia); 710 break; 711 712 default: 713 if_printf(sc->ifp, "bad interrupt type %08x\n", 714 status); 715 break; 716 } 717 } 718 719 /* write back head to clear queue */ 720 WRITE4(sc, HE_REGO_IRQ_HEAD(0), 721 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | 722 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) | 723 (q->head << HE_REGS_IRQ_HEAD_HEAD)); 724 BARRIER_W(sc); 725 726 /* workaround the back-to-back irq access problem (8.1.2) */ 727 (void)READ4(sc, HE_REGO_INT_FIFO); 728 BARRIER_R(sc); 729 730 mtx_unlock(&sc->mtx); 731} 732