if_hatm_intr.c revision 175872
148104Syokota/*-
248104Syokota * Copyright (c) 2001-2003
348104Syokota *	Fraunhofer Institute for Open Communication Systems (FhG Fokus).
448104Syokota * 	All rights reserved.
548104Syokota * Author: Hartmut Brandt <harti@freebsd.org>
648104Syokota *
748104Syokota * Redistribution and use in source and binary forms, with or without
848104Syokota * modification, are permitted provided that the following conditions
948104Syokota * are met:
1048104Syokota * 1. Redistributions of source code must retain the above copyright
1148104Syokota *    notice, this list of conditions and the following disclaimer.
1248104Syokota * 2. Redistributions in binary form must reproduce the above copyright
1348104Syokota *    notice, this list of conditions and the following disclaimer in the
1448104Syokota *    documentation and/or other materials provided with the distribution.
1548104Syokota *
1648104Syokota * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1748104Syokota * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1848104Syokota * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1948104Syokota * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2048104Syokota * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2148104Syokota * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2248104Syokota * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2348104Syokota * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2448104Syokota * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2548104Syokota * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2648104Syokota * SUCH DAMAGE.
2748104Syokota */
28119420Sobrien
29119420Sobrien#include <sys/cdefs.h>
30119420Sobrien__FBSDID("$FreeBSD: head/sys/dev/hatm/if_hatm_intr.c 175872 2008-02-01 19:36:27Z phk $");
3148104Syokota
3248104Syokota/*
3348104Syokota * ForeHE driver.
3448104Syokota *
3548104Syokota * Interrupt handler.
3666834Sphk */
3766834Sphk
3848104Syokota#include "opt_inet.h"
3948104Syokota#include "opt_natm.h"
4065176Sdfr
4148104Syokota#include <sys/types.h>
4248104Syokota#include <sys/param.h>
4348104Syokota#include <sys/systm.h>
4448104Syokota#include <sys/malloc.h>
4548104Syokota#include <sys/kernel.h>
4648104Syokota#include <sys/bus.h>
4748104Syokota#include <sys/errno.h>
4848104Syokota#include <sys/conf.h>
4948104Syokota#include <sys/module.h>
5048104Syokota#include <sys/queue.h>
5148104Syokota#include <sys/syslog.h>
5248104Syokota#include <sys/condvar.h>
5348104Syokota#include <sys/sysctl.h>
5448104Syokota#include <vm/uma.h>
5548104Syokota
56102241Sarchie#include <sys/sockio.h>
5748104Syokota#include <sys/mbuf.h>
5848104Syokota#include <sys/socket.h>
5948104Syokota
6048104Syokota#include <net/if.h>
6148104Syokota#include <net/if_media.h>
6248104Syokota#include <net/if_atm.h>
6348104Syokota#include <net/route.h>
6448104Syokota#include <netinet/in.h>
6548104Syokota#include <netinet/if_atm.h>
66111119Simp
67102241Sarchie#include <machine/bus.h>
6848104Syokota#include <machine/resource.h>
6948104Syokota#include <sys/bus.h>
7078956Syokota#include <sys/rman.h>
7178956Syokota#include <dev/pci/pcireg.h>
7248104Syokota#include <dev/pci/pcivar.h>
7348104Syokota
7448104Syokota#include <dev/utopia/utopia.h>
7548104Syokota#include <dev/hatm/if_hatmconf.h>
7648104Syokota#include <dev/hatm/if_hatmreg.h>
77146476Smarius#include <dev/hatm/if_hatmvar.h>
7848104Syokota
7948104SyokotaCTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
8048104SyokotaCTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
8148104SyokotaCTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
82146476SmariusCTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
8348104SyokotaCTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
8448104SyokotaCTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
8548104Syokota
8648104SyokotaCTASSERT(MBUF0_PER_PAGE <= 256);
8748104SyokotaCTASSERT(MBUF1_PER_PAGE <= 256);
8848104Syokota
8948104Syokotastatic void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
9048104Syokota
9148104Syokota/*
9248104Syokota * Free an external mbuf to a list. We use atomic functions so that
9348104Syokota * we don't need a mutex for the list.
9448104Syokota *
9548104Syokota * Note that in general this algorithm is not safe when multiple readers
9648104Syokota * and writers are present. To cite from a mail from David Schultz
9748104Syokota * <das@freebsd.org>:
9848104Syokota *
99102241Sarchie *	It looks like this is subject to the ABA problem.  For instance,
10048104Syokota *	suppose X, Y, and Z are the top things on the freelist and a
10148104Syokota *	thread attempts to make an allocation.  You set buf to X and load
10248104Syokota *	buf->link (Y) into a register.  Then the thread get preempted, and
103102241Sarchie *	another thread allocates both X and Y, then frees X.  When the
10448104Syokota *	original thread gets the CPU again, X is still on top of the
10548104Syokota *	freelist, so the atomic operation succeeds.  However, the atomic
10648104Syokota *	op places Y on top of the freelist, even though Y is no longer
10748104Syokota *	free.
10848104Syokota *
10978956Syokota * We are, however sure that we have only one thread that ever allocates
11048104Syokota * buffers because the only place we're call from is the interrupt handler.
11148104Syokota * Under these circumstances the code looks safe.
11248104Syokota */
11348104Syokotavoid
11448104Syokotahatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
11548104Syokota{
11648104Syokota	for (;;) {
11748104Syokota		buf->link = *list;
11848104Syokota		if (atomic_cmpset_ptr((uintptr_t *)list, (uintptr_t)buf->link,
11948104Syokota		    (uintptr_t)buf))
12048104Syokota			break;
12148104Syokota	}
122146476Smarius}
12348104Syokota
12448104Syokotastatic __inline struct mbufx_free *
12548104Syokotahatm_ext_alloc(struct hatm_softc *sc, u_int g)
126146476Smarius{
12748104Syokota	struct mbufx_free *buf;
12848104Syokota
12948104Syokota	for (;;) {
13048104Syokota		if ((buf = sc->mbuf_list[g]) == NULL)
13148104Syokota			break;
13248104Syokota		if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
133146476Smarius			(uintptr_t)buf, (uintptr_t)buf->link))
13448104Syokota			break;
13548104Syokota	}
13648104Syokota	if (buf == NULL) {
137146476Smarius		hatm_mbuf_page_alloc(sc, g);
13848104Syokota		for (;;) {
13948104Syokota			if ((buf = sc->mbuf_list[g]) == NULL)
14048104Syokota				break;
14148104Syokota			if (atomic_cmpset_ptr((uintptr_t *)&sc->mbuf_list[g],
14248104Syokota			    (uintptr_t)buf, (uintptr_t)buf->link))
14348104Syokota				break;
144146476Smarius		}
14548104Syokota	}
14648104Syokota	return (buf);
14748104Syokota}
148146476Smarius
14948104Syokota/*
15048104Syokota * Either the queue treshold was crossed or a TPD with the INTR bit set
15148104Syokota * was transmitted.
15248104Syokota */
15348104Syokotastatic void
15448104Syokotahe_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
155146476Smarius{
15648104Syokota	uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
15748104Syokota	u_int no;
15848104Syokota
159146476Smarius	while (q->head != (*tailp >> 2)) {
16048104Syokota		no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
16148104Syokota		    HE_REGS_TPD_ADDR;
16248104Syokota		hatm_tx_complete(sc, TPD_ADDR(sc, no),
16348104Syokota		    (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
16448104Syokota
16548104Syokota		if (++q->head == q->size)
16648104Syokota			q->head = 0;
16748104Syokota	}
16848104Syokota	WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
16948104Syokota}
17048104Syokota
17148104Syokota/*
17248104Syokota * DMA loader function for external mbuf page.
17348104Syokota */
17448104Syokotastatic void
17548104Syokotahatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
17648104Syokota    int error)
17748104Syokota{
17848104Syokota	if (error) {
179146476Smarius		printf("%s: mapping error %d\n", __func__, error);
18048104Syokota		return;
18148104Syokota	}
18248104Syokota	KASSERT(nsegs == 1,
183146476Smarius	    ("too many segments for DMA: %d", nsegs));
18448104Syokota	KASSERT(segs[0].ds_addr <= 0xffffffffLU,
18548104Syokota	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
18648104Syokota
18748104Syokota	*(uint32_t *)arg = segs[0].ds_addr;
18848104Syokota}
18948104Syokota
190146476Smarius/*
19148104Syokota * Allocate a page of external mbuf storage for the small pools.
192146476Smarius * Create a DMA map and load it. Put all the chunks onto the right
19348104Syokota * free list.
19448104Syokota */
19548104Syokotastatic void
196146476Smariushatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
19748104Syokota{
19848104Syokota	struct mbuf_page *pg;
19948104Syokota	int err;
200146476Smarius	u_int i;
201146476Smarius
20248104Syokota	if (sc->mbuf_npages == sc->mbuf_max_pages)
20348104Syokota		return;
20448104Syokota	if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
20548104Syokota		return;
20648104Syokota
20748104Syokota	err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
20848104Syokota	if (err != 0) {
20948104Syokota		if_printf(sc->ifp, "%s -- bus_dmamap_create: %d\n",
21048104Syokota		    __func__, err);
21148104Syokota		free(pg, M_DEVBUF);
21248104Syokota		return;
21348104Syokota	}
21448104Syokota	err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
21548104Syokota	    hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
21648104Syokota	if (err != 0) {
217146476Smarius		if_printf(sc->ifp, "%s -- mbuf mapping failed %d\n",
218146476Smarius		    __func__, err);
21948104Syokota		bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
22048104Syokota		free(pg, M_DEVBUF);
22148104Syokota		return;
222146476Smarius	}
223146476Smarius
22448104Syokota	sc->mbuf_pages[sc->mbuf_npages] = pg;
22548104Syokota
22648104Syokota	if (group == 0) {
22748104Syokota		struct mbuf0_chunk *c;
22848104Syokota
22948104Syokota		pg->hdr.pool = 0;
23048104Syokota		pg->hdr.nchunks = MBUF0_PER_PAGE;
23148104Syokota		pg->hdr.chunksize = MBUF0_CHUNK;
23248104Syokota		pg->hdr.hdroff = sizeof(c->storage);
23348104Syokota		c = (struct mbuf0_chunk *)pg;
23448104Syokota		for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
23548104Syokota			c->hdr.pageno = sc->mbuf_npages;
23648104Syokota			c->hdr.chunkno = i;
23748104Syokota			c->hdr.flags = 0;
23848104Syokota			hatm_ext_free(&sc->mbuf_list[0],
23948104Syokota			    (struct mbufx_free *)c);
24048104Syokota		}
24148104Syokota	} else {
24248104Syokota		struct mbuf1_chunk *c;
24348104Syokota
244146476Smarius		pg->hdr.pool = 1;
24548104Syokota		pg->hdr.nchunks = MBUF1_PER_PAGE;
24648104Syokota		pg->hdr.chunksize = MBUF1_CHUNK;
24748104Syokota		pg->hdr.hdroff = sizeof(c->storage);
248146476Smarius		c = (struct mbuf1_chunk *)pg;
24948104Syokota		for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
25048104Syokota			c->hdr.pageno = sc->mbuf_npages;
25148104Syokota			c->hdr.chunkno = i;
25248104Syokota			c->hdr.flags = 0;
25348189Syokota			hatm_ext_free(&sc->mbuf_list[1],
25448189Syokota			    (struct mbufx_free *)c);
25548189Syokota		}
25648189Syokota	}
25748189Syokota	sc->mbuf_npages++;
25848189Syokota}
25948189Syokota
26048189Syokota/*
261146476Smarius * Free an mbuf and put it onto the free list.
262146476Smarius */
26348189Syokotastatic void
26448189Syokotahatm_mbuf0_free(void *buf, void *args)
265146476Smarius{
266146476Smarius	struct hatm_softc *sc = args;
26748189Syokota	struct mbuf0_chunk *c = buf;
26848189Syokota
26948189Syokota	KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
27048189Syokota	    ("freeing unused mbuf %x", c->hdr.flags));
27148189Syokota	c->hdr.flags &= ~MBUF_USED;
27248104Syokota	hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
27348104Syokota}
27448104Syokotastatic void
27548104Syokotahatm_mbuf1_free(void *buf, void *args)
27648104Syokota{
27748104Syokota	struct hatm_softc *sc = args;
27848104Syokota	struct mbuf1_chunk *c = buf;
27948104Syokota
280146476Smarius	KASSERT((c->hdr.flags & (MBUF_USED | MBUF_CARD)) == MBUF_USED,
281146476Smarius	    ("freeing unused mbuf %x", c->hdr.flags));
28248104Syokota	c->hdr.flags &= ~MBUF_USED;
28348104Syokota	hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
28448104Syokota}
285146476Smarius
286146476Smariusstatic void
28748104Syokotahatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
28848104Syokota{
28948104Syokota	uint32_t *ptr = (uint32_t *)arg;
29048104Syokota
291146476Smarius	if (nsegs == 0) {
29248104Syokota		printf("%s: error=%d\n", __func__, error);
29348104Syokota		return;
29448104Syokota	}
29548104Syokota	KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
296146476Smarius	KASSERT(segs[0].ds_addr <= 0xffffffffLU,
29748104Syokota	    ("phys addr too large %lx", (u_long)segs[0].ds_addr));
29848104Syokota
29948104Syokota	*ptr = segs[0].ds_addr;
30048104Syokota}
30148104Syokota
30248104Syokota/*
30348104Syokota * Receive buffer pool interrupt. This means the number of entries in the
304146476Smarius * queue has dropped below the threshold. Try to supply new buffers.
30548104Syokota */
306146476Smariusstatic void
307146476Smariushe_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
308146476Smarius    u_int group)
30948104Syokota{
31048104Syokota	u_int ntail;
31148104Syokota	struct mbuf *m;
312146476Smarius	int error;
313146476Smarius	struct mbufx_free *cf;
31448104Syokota	struct mbuf_page *pg;
31548104Syokota	struct mbuf0_chunk *buf0;
31648104Syokota	struct mbuf1_chunk *buf1;
31748104Syokota
318146476Smarius	DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
31948104Syokota	   large ? "large" : "small", group));
32048104Syokota
32148104Syokota	rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
322146476Smarius	    & (rbp->size - 1);
32348104Syokota
32448104Syokota	for (;;) {
325		if ((ntail = rbp->tail + 1) == rbp->size)
326			ntail = 0;
327		if (ntail == rbp->head)
328			break;
329		m = NULL;
330
331		if (large) {
332			/* allocate the MBUF */
333			if ((m = m_getcl(M_DONTWAIT, MT_DATA,
334			    M_PKTHDR)) == NULL) {
335				if_printf(sc->ifp,
336				    "no mbuf clusters\n");
337				break;
338			}
339			m->m_data += MBUFL_OFFSET;
340
341			if (sc->lbufs[sc->lbufs_next] != NULL)
342				panic("hatm: lbufs full %u", sc->lbufs_next);
343			sc->lbufs[sc->lbufs_next] = m;
344
345			if ((error = bus_dmamap_load(sc->mbuf_tag,
346			    sc->rmaps[sc->lbufs_next],
347			    m->m_data, rbp->bsize, hatm_mbuf_helper,
348			    &rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != 0)
349				panic("hatm: mbuf mapping failed %d", error);
350
351			bus_dmamap_sync(sc->mbuf_tag,
352			    sc->rmaps[sc->lbufs_next],
353			    BUS_DMASYNC_PREREAD);
354
355			rbp->rbp[rbp->tail].handle =
356			    MBUF_MAKE_LHANDLE(sc->lbufs_next);
357
358			if (++sc->lbufs_next == sc->lbufs_size)
359				sc->lbufs_next = 0;
360
361		} else if (group == 0) {
362			/*
363			 * Allocate small buffer in group 0
364			 */
365			if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
366				break;
367			buf0 = (struct mbuf0_chunk *)cf;
368			pg = sc->mbuf_pages[buf0->hdr.pageno];
369			buf0->hdr.flags |= MBUF_CARD;
370			rbp->rbp[rbp->tail].phys = pg->hdr.phys +
371			    buf0->hdr.chunkno * MBUF0_CHUNK + MBUF0_OFFSET;
372			rbp->rbp[rbp->tail].handle =
373			    MBUF_MAKE_HANDLE(buf0->hdr.pageno,
374			    buf0->hdr.chunkno);
375
376			bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
377			    BUS_DMASYNC_PREREAD);
378
379		} else if (group == 1) {
380			/*
381			 * Allocate small buffer in group 1
382			 */
383			if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
384				break;
385			buf1 = (struct mbuf1_chunk *)cf;
386			pg = sc->mbuf_pages[buf1->hdr.pageno];
387			buf1->hdr.flags |= MBUF_CARD;
388			rbp->rbp[rbp->tail].phys = pg->hdr.phys +
389			    buf1->hdr.chunkno * MBUF1_CHUNK + MBUF1_OFFSET;
390			rbp->rbp[rbp->tail].handle =
391			    MBUF_MAKE_HANDLE(buf1->hdr.pageno,
392			    buf1->hdr.chunkno);
393
394			bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map,
395			    BUS_DMASYNC_PREREAD);
396
397		} else
398			/* ups */
399			break;
400
401		DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
402		    rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
403
404		rbp->tail = ntail;
405	}
406	WRITE4(sc, HE_REGO_RBP_T(large, group),
407	    (rbp->tail << HE_REGS_RBP_TAIL));
408}
409
410/*
411 * Extract the buffer and hand it to the receive routine
412 */
413static struct mbuf *
414hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
415{
416	u_int pageno;
417	u_int chunkno;
418	struct mbuf *m;
419
420	if (handle & MBUF_LARGE_FLAG) {
421		/* large buffer - sync and unload */
422		MBUF_PARSE_LHANDLE(handle, handle);
423		DBG(sc, RX, ("RX large handle=%x", handle));
424
425		bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
426		    BUS_DMASYNC_POSTREAD);
427		bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
428
429		m = sc->lbufs[handle];
430		sc->lbufs[handle] = NULL;
431
432		return (m);
433	}
434
435	MBUF_PARSE_HANDLE(handle, pageno, chunkno);
436
437	DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
438	    pageno, chunkno));
439
440	MGETHDR(m, M_DONTWAIT, MT_DATA);
441
442	if (group == 0) {
443		struct mbuf0_chunk *c0;
444
445		c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
446		KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
447		    c0->hdr.pageno, pageno));
448		KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
449		    c0->hdr.chunkno, chunkno));
450		KASSERT(c0->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
451		    pageno, chunkno));
452		KASSERT(!(c0->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
453		    pageno, chunkno));
454
455		c0->hdr.flags |= MBUF_USED;
456		c0->hdr.flags &= ~MBUF_CARD;
457
458		if (m != NULL) {
459			m->m_ext.ref_cnt = &c0->hdr.ref_cnt;
460			MEXTADD(m, (void *)c0, MBUF0_SIZE,
461			    hatm_mbuf0_free, c0, sc, M_PKTHDR, EXT_EXTREF);
462			m->m_data += MBUF0_OFFSET;
463		} else
464			hatm_mbuf0_free(c0, sc);
465
466	} else {
467		struct mbuf1_chunk *c1;
468
469		c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
470		KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
471		    c1->hdr.pageno, pageno));
472		KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
473		    c1->hdr.chunkno, chunkno));
474		KASSERT(c1->hdr.flags & MBUF_CARD, ("mbuf not on card %u/%u",
475		    pageno, chunkno));
476		KASSERT(!(c1->hdr.flags & MBUF_USED), ("used mbuf %u/%u",
477		    pageno, chunkno));
478
479		c1->hdr.flags |= MBUF_USED;
480		c1->hdr.flags &= ~MBUF_CARD;
481
482		if (m != NULL) {
483			m->m_ext.ref_cnt = &c1->hdr.ref_cnt;
484			MEXTADD(m, (void *)c1, MBUF1_SIZE,
485			    hatm_mbuf1_free, c1, sc, M_PKTHDR, EXT_EXTREF);
486			m->m_data += MBUF1_OFFSET;
487		} else
488			hatm_mbuf1_free(c1, sc);
489	}
490
491	return (m);
492}
493
494/*
495 * Interrupt because of receive buffer returned.
496 */
497static void
498he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
499{
500	struct he_rbrqen *e;
501	uint32_t flags, tail;
502	u_int cid, len;
503	struct mbuf *m;
504
505	for (;;) {
506		tail = sc->hsp->group[group].rbrq_tail >> 3;
507
508		if (rq->head == tail)
509			break;
510
511		e = &rq->rbrq[rq->head];
512
513		flags = e->addr & HE_REGM_RBRQ_FLAGS;
514		if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
515			m = hatm_rx_buffer(sc, group, e->addr);
516		else
517			m = NULL;
518
519		cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
520		len = 4 * (e->len & HE_REGM_RBRQ_LEN);
521
522		hatm_rx(sc, cid, flags, m, len);
523
524		if (++rq->head == rq->size)
525			rq->head = 0;
526	}
527	WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
528}
529
530void
531hatm_intr(void *p)
532{
533	struct heirq *q = p;
534	struct hatm_softc *sc = q->sc;
535	u_int status;
536	u_int tail;
537
538	/* if we have a stray interrupt with a non-initialized card,
539	 * we cannot even lock before looking at the flag */
540	if (!(sc->ifp->if_drv_flags & IFF_DRV_RUNNING))
541		return;
542
543	mtx_lock(&sc->mtx);
544	(void)READ4(sc, HE_REGO_INT_FIFO);
545
546	tail = *q->tailp;
547	if (q->head == tail) {
548		/* workaround for tail pointer not updated bug (8.1.1) */
549		DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
550
551		/* read the tail pointer from the card */
552		tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
553		    HE_REGM_IRQ_BASE_TAIL;
554		BARRIER_R(sc);
555
556		sc->istats.bug_no_irq_upd++;
557	}
558
559	/* clear the interrupt */
560	WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
561	BARRIER_W(sc);
562
563	while (q->head != tail) {
564		status = q->irq[q->head];
565		q->irq[q->head] = HE_REGM_ITYPE_INVALID;
566		if (++q->head == (q->size - 1))
567			q->head = 0;
568
569		switch (status & HE_REGM_ITYPE) {
570
571		  case HE_REGM_ITYPE_TBRQ:
572			DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
573			sc->istats.itype_tbrq++;
574			he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
575			break;
576
577		  case HE_REGM_ITYPE_TPD:
578			DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
579			sc->istats.itype_tpd++;
580			he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
581			break;
582
583		  case HE_REGM_ITYPE_RBPS:
584			sc->istats.itype_rbps++;
585			switch (status & HE_REGM_IGROUP) {
586
587			  case 0:
588				he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
589				break;
590
591			  case 1:
592				he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
593				break;
594
595			  default:
596				if_printf(sc->ifp, "bad INTR RBPS%u\n",
597				    status & HE_REGM_IGROUP);
598				break;
599			}
600			break;
601
602		  case HE_REGM_ITYPE_RBPL:
603			sc->istats.itype_rbpl++;
604			switch (status & HE_REGM_IGROUP) {
605
606			  case 0:
607				he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
608				break;
609
610			  default:
611				if_printf(sc->ifp, "bad INTR RBPL%u\n",
612				    status & HE_REGM_IGROUP);
613				break;
614			}
615			break;
616
617		  case HE_REGM_ITYPE_RBRQ:
618			DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
619			sc->istats.itype_rbrq++;
620			switch (status & HE_REGM_IGROUP) {
621
622			  case 0:
623				he_intr_rbrq(sc, &sc->rbrq_0, 0);
624				break;
625
626			  case 1:
627				if (sc->rbrq_1.size > 0) {
628					he_intr_rbrq(sc, &sc->rbrq_1, 1);
629					break;
630				}
631				/* FALLTHRU */
632
633			  default:
634				if_printf(sc->ifp, "bad INTR RBRQ%u\n",
635				    status & HE_REGM_IGROUP);
636				break;
637			}
638			break;
639
640		  case HE_REGM_ITYPE_RBRQT:
641			DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
642			sc->istats.itype_rbrqt++;
643			switch (status & HE_REGM_IGROUP) {
644
645			  case 0:
646				he_intr_rbrq(sc, &sc->rbrq_0, 0);
647				break;
648
649			  case 1:
650				if (sc->rbrq_1.size > 0) {
651					he_intr_rbrq(sc, &sc->rbrq_1, 1);
652					break;
653				}
654				/* FALLTHRU */
655
656			  default:
657				if_printf(sc->ifp, "bad INTR RBRQT%u\n",
658				    status & HE_REGM_IGROUP);
659				break;
660			}
661			break;
662
663		  case HE_REGM_ITYPE_PHYS:
664			sc->istats.itype_phys++;
665			utopia_intr(&sc->utopia);
666			break;
667
668#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
669		  case HE_REGM_ITYPE_UNKNOWN:
670			sc->istats.itype_unknown++;
671			if_printf(sc->ifp, "bad interrupt\n");
672			break;
673#endif
674
675		  case HE_REGM_ITYPE_ERR:
676			sc->istats.itype_err++;
677			switch (status) {
678
679			  case HE_REGM_ITYPE_PERR:
680				if_printf(sc->ifp, "parity error\n");
681				break;
682
683			  case HE_REGM_ITYPE_ABORT:
684				if_printf(sc->ifp, "abort interrupt "
685				    "addr=0x%08x\n",
686				    READ4(sc, HE_REGO_ABORT_ADDR));
687				break;
688
689			  default:
690				if_printf(sc->ifp,
691				    "bad interrupt type %08x\n", status);
692				break;
693			}
694			break;
695
696		  case HE_REGM_ITYPE_INVALID:
697			/* this is the documented fix for the ISW bug 8.1.1
698			 * Note, that the documented fix is partly wrong:
699			 * the ISWs should be intialized to 0xf8 not 0xff */
700			sc->istats.bug_bad_isw++;
701			DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
702			he_intr_tbrq(sc, &sc->tbrq, 0);
703			he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
704			he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
705			he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
706			he_intr_rbrq(sc, &sc->rbrq_0, 0);
707			he_intr_rbrq(sc, &sc->rbrq_1, 1);
708			utopia_intr(&sc->utopia);
709			break;
710
711		  default:
712			if_printf(sc->ifp, "bad interrupt type %08x\n",
713			    status);
714			break;
715		}
716	}
717
718	/* write back head to clear queue */
719	WRITE4(sc, HE_REGO_IRQ_HEAD(0),
720	    ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
721	    (q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
722	    (q->head << HE_REGS_IRQ_HEAD_HEAD));
723	BARRIER_W(sc);
724
725	/* workaround the back-to-back irq access problem (8.1.2) */
726	(void)READ4(sc, HE_REGO_INT_FIFO);
727	BARRIER_R(sc);
728
729	mtx_unlock(&sc->mtx);
730}
731