Deleted Added
sdiff udiff text old ( 121729 ) new ( 121744 )
full compact
1/*
2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 * Author: Hartmut Brandt <harti@freebsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm_intr.c 121744 2003-10-30 16:19:50Z harti $");
31
32/*
33 * ForeHE driver.
34 *
35 * Interrupt handler.
36 */
37
38#include "opt_inet.h"
39#include "opt_natm.h"
40
41#include <sys/types.h>
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/bus.h>
47#include <sys/errno.h>
48#include <sys/conf.h>
49#include <sys/module.h>
50#include <sys/queue.h>
51#include <sys/syslog.h>
52#include <sys/condvar.h>
53#include <sys/sysctl.h>
54#include <vm/uma.h>
55
56#include <sys/sockio.h>
57#include <sys/mbuf.h>
58#include <sys/socket.h>
59
60#include <net/if.h>
61#include <net/if_media.h>
62#include <net/if_atm.h>
63#include <net/route.h>
64#include <netinet/in.h>
65#include <netinet/if_atm.h>
66
67#include <machine/bus.h>
68#include <machine/resource.h>
69#include <sys/bus.h>
70#include <sys/rman.h>
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73
74#include <dev/utopia/utopia.h>
75#include <dev/hatm/if_hatmconf.h>
76#include <dev/hatm/if_hatmreg.h>
77#include <dev/hatm/if_hatmvar.h>
78
79CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
80CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
81CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
82CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
83CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
84CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
85
86CTASSERT(MBUF0_PER_PAGE <= 256);
87CTASSERT(MBUF1_PER_PAGE <= 256);
88
89static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
90
91/*
92 * Free an external mbuf to a list. We use atomic functions so that
93 * we don't need a mutex for the list.
94 *
95 * Note that in general this algorithm is not safe when multiple readers
96 * and writers are present. To cite from a mail from David Schultz
97 * <das@freebsd.org>:
98 *
99 * It looks like this is subject to the ABA problem. For instance,
100 * suppose X, Y, and Z are the top things on the freelist and a
101 * thread attempts to make an allocation. You set buf to X and load
102 * buf->link (Y) into a register. Then the thread get preempted, and
103 * another thread allocates both X and Y, then frees X. When the
104 * original thread gets the CPU again, X is still on top of the
105 * freelist, so the atomic operation succeeds. However, the atomic
106 * op places Y on top of the freelist, even though Y is no longer
107 * free.
108 *
109 * We are, however sure that we have only one thread that ever allocates
110 * buffers because the only place we're call from is the interrupt handler.
111 * Under these circumstances the code looks safe.
112 */
113__inline void
114hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
115{
116 for (;;) {
117 buf->link = *list;
118 if (atomic_cmpset_ptr(list, buf->link, buf))
119 break;
120 }
121}
122
123static __inline struct mbufx_free *
124hatm_ext_alloc(struct hatm_softc *sc, u_int g)
125{
126 struct mbufx_free *buf;
127
128 for (;;) {
129 if ((buf = sc->mbuf_list[g]) == NULL)
130 break;
131 if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link))
132 break;
133 }
134 if (buf == NULL) {
135 hatm_mbuf_page_alloc(sc, g);
136 for (;;) {
137 if ((buf = sc->mbuf_list[g]) == NULL)
138 break;
139 if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link))
140 break;
141 }
142 }
143 return (buf);
144}
145
146/*
147 * Either the queue treshold was crossed or a TPD with the INTR bit set
148 * was transmitted.
149 */
150static void
151he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
152{
153 uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
154 u_int no;
155
156 while (q->head != (*tailp >> 2)) {
157 no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
158 HE_REGS_TPD_ADDR;
159 hatm_tx_complete(sc, TPD_ADDR(sc, no),
160 (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
161
162 if (++q->head == q->size)
163 q->head = 0;
164 }
165 WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
166}
167
168/*
169 * DMA loader function for external mbuf page.
170 */
171static void
172hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
173 int error)
174{
175 if (error) {
176 printf("%s: mapping error %d\n", __func__, error);
177 return;
178 }
179 KASSERT(nsegs == 1,
180 ("too many segments for DMA: %d", nsegs));
181 KASSERT(segs[0].ds_addr <= 0xffffffffLU,
182 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
183
184 *(uint32_t *)arg = segs[0].ds_addr;
185}
186
187/*
188 * Allocate a page of external mbuf storage for the small pools.
189 * Create a DMA map and load it. Put all the chunks onto the right
190 * free list.
191 */
192static void
193hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
194{
195 struct mbuf_page *pg;
196 int err;
197 u_int i;
198
199 if (sc->mbuf_npages == sc->mbuf_max_pages)
200 return;
201 if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
202 return;
203
204 err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
205 if (err != 0) {
206 if_printf(&sc->ifatm.ifnet, "%s -- bus_dmamap_create: %d\n",
207 __func__, err);
208 free(pg, M_DEVBUF);
209 return;
210 }
211 err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
212 hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
213 if (err != 0) {
214 if_printf(&sc->ifatm.ifnet, "%s -- mbuf mapping failed %d\n",
215 __func__, err);
216 bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
217 free(pg, M_DEVBUF);
218 return;
219 }
220
221 sc->mbuf_pages[sc->mbuf_npages] = pg;
222
223 if (group == 0) {
224 struct mbuf0_chunk *c;
225
226 pg->hdr.pool = 0;
227 pg->hdr.nchunks = MBUF0_PER_PAGE;
228 pg->hdr.chunksize = MBUF0_CHUNK;
229 pg->hdr.hdroff = sizeof(c->storage);
230 c = (struct mbuf0_chunk *)pg;
231 for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
232 c->hdr.pageno = sc->mbuf_npages;
233 c->hdr.chunkno = i;
234 c->hdr.flags = MBUF_USED;
235 hatm_ext_free(&sc->mbuf_list[0],
236 (struct mbufx_free *)c);
237 }
238 } else {
239 struct mbuf1_chunk *c;
240
241 pg->hdr.pool = 1;
242 pg->hdr.nchunks = MBUF1_PER_PAGE;
243 pg->hdr.chunksize = MBUF1_CHUNK;
244 pg->hdr.hdroff = sizeof(c->storage);
245 c = (struct mbuf1_chunk *)pg;
246 for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
247 c->hdr.pageno = sc->mbuf_npages;
248 c->hdr.chunkno = i;
249 c->hdr.flags = MBUF_USED;
250 hatm_ext_free(&sc->mbuf_list[1],
251 (struct mbufx_free *)c);
252 }
253 }
254 sc->mbuf_npages++;
255}
256
257/*
258 * Free an mbuf and put it onto the free list.
259 */
260static void
261hatm_mbuf0_free(void *buf, void *args)
262{
263 struct hatm_softc *sc = args;
264 struct mbuf0_chunk *c = buf;
265
266 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
267 ("freeing unused mbuf %x", c->hdr.flags));
268 c->hdr.flags &= ~MBUF_USED;
269 hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
270}
271static void
272hatm_mbuf1_free(void *buf, void *args)
273{
274 struct hatm_softc *sc = args;
275 struct mbuf1_chunk *c = buf;
276
277 KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
278 ("freeing unused mbuf %x", c->hdr.flags));
279 c->hdr.flags &= ~MBUF_USED;
280 hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
281}
282
283static void
284hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
285{
286 uint32_t *ptr = (uint32_t *)arg;
287
288 if (nsegs == 0) {
289 printf("%s: error=%d\n", __func__, error);
290 return;
291 }
292 KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
293 KASSERT(segs[0].ds_addr <= 0xffffffffLU,
294 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
295
296 *ptr = segs[0].ds_addr;
297}
298
299/*
300 * Receive buffer pool interrupt. This means the number of entries in the
301 * queue has dropped below the threshold. Try to supply new buffers.
302 */
303static void
304he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
305 u_int group)
306{
307 u_int ntail;
308 struct mbuf *m;
309 int error;
310 struct mbufx_free *cf;
311 struct mbuf_page *pg;
312 struct mbuf0_chunk *buf0;
313 struct mbuf1_chunk *buf1;
314
315 DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
316 large ? "large" : "small", group));
317
318 rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
319 & (rbp->size - 1);
320
321 for (;;) {
322 if ((ntail = rbp->tail + 1) == rbp->size)
323 ntail = 0;
324 if (ntail == rbp->head)
325 break;
326 m = NULL;
327
328 if (large) {
329 /* allocate the MBUF */
330 if ((m = m_getcl(M_DONTWAIT, MT_DATA,
331 M_PKTHDR)) == NULL) {
332 if_printf(&sc->ifatm.ifnet,
333 "no mbuf clusters\n");
334 break;
335 }
336 m->m_data += MBUFL_OFFSET;
337
338 if (sc->lbufs[sc->lbufs_next] != NULL)
339 panic("hatm: lbufs full %u", sc->lbufs_next);
340 sc->lbufs[sc->lbufs_next] = m;
341
342 if ((error = bus_dmamap_load(sc->mbuf_tag,
343 sc->rmaps[sc->lbufs_next],
344 m->m_data, rbp->bsize, hatm_mbuf_helper,
345 &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != NULL)
346 panic("hatm: mbuf mapping failed %d", error);
347
348 bus_dmamap_sync(sc->mbuf_tag,
349 sc->rmaps[sc->lbufs_next],
350 BUS_DMASYNC_PREREAD);
351
352 rbp->rbp[rbp->tail].handle =
353 MBUF_MAKE_LHANDLE(sc->lbufs_next);
354
355 if (++sc->lbufs_next == sc->lbufs_size)
356 sc->lbufs_next = 0;
357
358 } else if (group == 0) {
359 /*
360 * Allocate small buffer in group 0
361 */
362 if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
363 break;
364 buf0 = (struct mbuf0_chunk *)cf;
365 pg = sc->mbuf_pages[buf0->hdr.pageno];
366 buf0->hdr.flags |= MBUF_CARD;
367 rbp->rbp[rbp->tail].phys = pg->hdr.phys +
368 buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
369 rbp->rbp[rbp->tail].handle =
370 MBUF_MAKE_HANDLE(buf0->hdr.pageno,
371 buf0->hdr.chunkno);
372
373 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
374 BUS_DMASYNC_PREREAD);
375
376 } else if (group == 1) {
377 /*
378 * Allocate small buffer in group 1
379 */
380 if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
381 break;
382 buf1 = (struct mbuf1_chunk *)cf;
383 pg = sc->mbuf_pages[buf1->hdr.pageno];
384 buf1->hdr.flags |= MBUF_CARD;
385 rbp->rbp[rbp->tail].phys = pg->hdr.phys +
386 buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
387 rbp->rbp[rbp->tail].handle =
388 MBUF_MAKE_HANDLE(buf1->hdr.pageno,
389 buf1->hdr.chunkno);
390
391 bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
392 BUS_DMASYNC_PREREAD);
393
394 } else
395 /* ups */
396 break;
397
398 DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
399 rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
400
401 rbp->tail = ntail;
402 }
403 WRITE4(sc, HE_REGO_RBP_T(large, group),
404 (rbp->tail << HE_REGS_RBP_TAIL));
405}
406
407/*
408 * Extract the buffer and hand it to the receive routine
409 */
410static struct mbuf *
411hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
412{
413 u_int pageno;
414 u_int chunkno;
415 struct mbuf *m;
416
417 if (handle & MBUF_LARGE_FLAG) {
418 /* large buffer - sync and unload */
419 MBUF_PARSE_LHANDLE(handle, handle);
420 DBG(sc, RX, ("RX large handle=%x", handle));
421
422 bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
423 BUS_DMASYNC_POSTREAD);
424 bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
425
426 m = sc->lbufs[handle];
427 sc->lbufs[handle] = NULL;
428
429 return (m);
430 }
431
432 MBUF_PARSE_HANDLE(handle, pageno, chunkno);
433
434 DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
435 pageno, chunkno));
436
437 MGETHDR(m, M_DONTWAIT, MT_DATA);
438
439 if (group == 0) {
440 struct mbuf0_chunk *c0;
441
442 c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
443 KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
444 c0->hdr.pageno, pageno));
445 KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
446 c0->hdr.chunkno, chunkno));
447 KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
448 pageno, chunkno));
449 KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
450 pageno, chunkno));
451
452 c0->hdr.flags |= MBUF_USED;
453 c0->hdr.flags &= ~MBUF_CARD;
454
455 if (m != NULL) {
456 m->m_ext.ref_cnt = &c0->hdr.ref_cnt;
457 m_extadd(m, (void *)c0, MBUF0_SIZE,
458 hatm_mbuf0_free, sc, M_PKTHDR, EXT_EXTREF);
459 m->m_data += MBUF0_OFFSET;
460 } else
461 hatm_mbuf0_free(c0, sc);
462
463 } else {
464 struct mbuf1_chunk *c1;
465
466 c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
467 KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
468 c1->hdr.pageno, pageno));
469 KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
470 c1->hdr.chunkno, chunkno));
471 KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
472 pageno, chunkno));
473 KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
474 pageno, chunkno));
475
476 c1->hdr.flags |= MBUF_USED;
477 c1->hdr.flags &= ~MBUF_CARD;
478
479 if (m != NULL) {
480 m->m_ext.ref_cnt = &c1->hdr.ref_cnt;
481 m_extadd(m, (void *)c1, MBUF1_SIZE,
482 hatm_mbuf1_free, sc, M_PKTHDR, EXT_EXTREF);
483 m->m_data += MBUF1_OFFSET;
484 } else
485 hatm_mbuf1_free(c1, sc);
486 }
487
488 return (m);
489}
490
491/*
492 * Interrupt because of receive buffer returned.
493 */
494static void
495he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
496{
497 struct he_rbrqen *e;
498 uint32_t flags, tail;
499 u_int cid, len;
500 struct mbuf *m;
501
502 for (;;) {
503 tail = sc->hsp->group[group].rbrq_tail >> 3;
504
505 if (rq->head == tail)
506 break;
507
508 e = &rq->rbrq[rq->head];
509
510 flags = e->addr & HE_REGM_RBRQ_FLAGS;
511 if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
512 m = hatm_rx_buffer(sc, group, e->addr);
513 else
514 m = NULL;
515
516 cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
517 len = 4 * (e->len & HE_REGM_RBRQ_LEN);
518
519 hatm_rx(sc, cid, flags, m, len);
520
521 if (++rq->head == rq->size)
522 rq->head = 0;
523 }
524 WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
525}
526
527void
528hatm_intr(void *p)
529{
530 struct heirq *q = p;
531 struct hatm_softc *sc = q->sc;
532 u_int status;
533 u_int tail;
534
535 /* if we have a stray interrupt with a non-initialized card,
536 * we cannot even lock before looking at the flag */
537 if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
538 return;
539
540 mtx_lock(&sc->mtx);
541 (void)READ4(sc, HE_REGO_INT_FIFO);
542
543 tail = *q->tailp;
544 if (q->head == tail) {
545 /* workaround for tail pointer not updated bug (8.1.1) */
546 DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
547
548 /* read the tail pointer from the card */
549 tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
550 HE_REGM_IRQ_BASE_TAIL;
551 BARRIER_R(sc);
552
553 sc->istats.bug_no_irq_upd++;
554 }
555
556 /* clear the interrupt */
557 WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
558 BARRIER_W(sc);
559
560 while (q->head != tail) {
561 status = q->irq[q->head];
562 q->irq[q->head] = HE_REGM_ITYPE_INVALID;
563 if (++q->head == (q->size - 1))
564 q->head = 0;
565
566 switch (status & HE_REGM_ITYPE) {
567
568 case HE_REGM_ITYPE_TBRQ:
569 DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
570 sc->istats.itype_tbrq++;
571 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
572 break;
573
574 case HE_REGM_ITYPE_TPD:
575 DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
576 sc->istats.itype_tpd++;
577 he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
578 break;
579
580 case HE_REGM_ITYPE_RBPS:
581 sc->istats.itype_rbps++;
582 switch (status & HE_REGM_IGROUP) {
583
584 case 0:
585 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
586 break;
587
588 case 1:
589 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
590 break;
591
592 default:
593 if_printf(&sc->ifatm.ifnet, "bad INTR RBPS%u\n",
594 status & HE_REGM_IGROUP);
595 break;
596 }
597 break;
598
599 case HE_REGM_ITYPE_RBPL:
600 sc->istats.itype_rbpl++;
601 switch (status & HE_REGM_IGROUP) {
602
603 case 0:
604 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
605 break;
606
607 default:
608 if_printf(&sc->ifatm.ifnet, "bad INTR RBPL%u\n",
609 status & HE_REGM_IGROUP);
610 break;
611 }
612 break;
613
614 case HE_REGM_ITYPE_RBRQ:
615 DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
616 sc->istats.itype_rbrq++;
617 switch (status & HE_REGM_IGROUP) {
618
619 case 0:
620 he_intr_rbrq(sc, &sc->rbrq_0, 0);
621 break;
622
623 case 1:
624 if (sc->rbrq_1.size > 0) {
625 he_intr_rbrq(sc, &sc->rbrq_1, 1);
626 break;
627 }
628 /* FALLTHRU */
629
630 default:
631 if_printf(&sc->ifatm.ifnet, "bad INTR RBRQ%u\n",
632 status & HE_REGM_IGROUP);
633 break;
634 }
635 break;
636
637 case HE_REGM_ITYPE_RBRQT:
638 DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
639 sc->istats.itype_rbrqt++;
640 switch (status & HE_REGM_IGROUP) {
641
642 case 0:
643 he_intr_rbrq(sc, &sc->rbrq_0, 0);
644 break;
645
646 case 1:
647 if (sc->rbrq_1.size > 0) {
648 he_intr_rbrq(sc, &sc->rbrq_1, 1);
649 break;
650 }
651 /* FALLTHRU */
652
653 default:
654 if_printf(&sc->ifatm.ifnet, "bad INTR RBRQT%u\n",
655 status & HE_REGM_IGROUP);
656 break;
657 }
658 break;
659
660 case HE_REGM_ITYPE_PHYS:
661 sc->istats.itype_phys++;
662 utopia_intr(&sc->utopia);
663 break;
664
665#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
666 case HE_REGM_ITYPE_UNKNOWN:
667 sc->istats.itype_unknown++;
668 if_printf(&sc->ifatm.ifnet, "bad interrupt\n");
669 break;
670#endif
671
672 case HE_REGM_ITYPE_ERR:
673 sc->istats.itype_err++;
674 switch (status) {
675
676 case HE_REGM_ITYPE_PERR:
677 if_printf(&sc->ifatm.ifnet, "parity error\n");
678 break;
679
680 case HE_REGM_ITYPE_ABORT:
681 if_printf(&sc->ifatm.ifnet, "abort interrupt "
682 "addr=0x%08x\n",
683 READ4(sc, HE_REGO_ABORT_ADDR));
684 break;
685
686 default:
687 if_printf(&sc->ifatm.ifnet,
688 "bad interrupt type %08x\n", status);
689 break;
690 }
691 break;
692
693 case HE_REGM_ITYPE_INVALID:
694 /* this is the documented fix for the ISW bug 8.1.1
695 * Note, that the documented fix is partly wrong:
696 * the ISWs should be intialized to 0xf8 not 0xff */
697 sc->istats.bug_bad_isw++;
698 DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
699 he_intr_tbrq(sc, &sc->tbrq, 0);
700 he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
701 he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
702 he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
703 he_intr_rbrq(sc, &sc->rbrq_0, 0);
704 he_intr_rbrq(sc, &sc->rbrq_1, 1);
705 utopia_intr(&sc->utopia);
706 break;
707
708 default:
709 if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n",
710 status);
711 break;
712 }
713 }
714
715 /* write back head to clear queue */
716 WRITE4(sc, HE_REGO_IRQ_HEAD(0),
717 ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
718 (q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
719 (q->head << HE_REGS_IRQ_HEAD_HEAD));
720 BARRIER_W(sc);
721
722 /* workaround the back-to-back irq access problem (8.1.2) */
723 (void)READ4(sc, HE_REGO_INT_FIFO);
724 BARRIER_R(sc);
725
726 mtx_unlock(&sc->mtx);
727}