pdq_ifsubr.c revision 148887
1/*	$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $	*/
2
3/*-
4 * Copyright (c) 1995, 1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/pdq/pdq_ifsubr.c 148887 2005-08-09 10:20:02Z rwatson $");
31
32/*
33 * DEC PDQ FDDI Controller; code for BSD derived operating systems
34 *
35 *	This module provide bus independent BSD specific O/S functions.
36 *	(ie. it provides an ifnet interface to the rest of the system)
37 */
38
39
40#define PDQ_OSSUPPORT
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/malloc.h>
48#include <sys/socket.h>
49#include <sys/sockio.h>
50
51#include <sys/module.h>
52#include <sys/bus.h>
53
54#include <machine/bus.h>
55#include <machine/resource.h>
56#include <sys/rman.h>
57
58#include <net/if.h>
59#include <net/if_arp.h>
60#include <net/if_dl.h>
61#include <net/if_media.h>
62#include <net/if_types.h>
63#include <net/fddi.h>
64
65#include <net/bpf.h>
66
67#include <dev/pdq/pdq_freebsd.h>
68#include <dev/pdq/pdqreg.h>
69
70devclass_t pdq_devclass;
71
72static void
73pdq_ifinit(
74    pdq_softc_t *sc)
75{
76    if (PDQ_IFNET(sc)->if_flags & IFF_UP) {
77	PDQ_IFNET(sc)->if_drv_flags |= IFF_DRV_RUNNING;
78	if (PDQ_IFNET(sc)->if_flags & IFF_PROMISC) {
79	    sc->sc_pdq->pdq_flags |= PDQ_PROMISC;
80	} else {
81	    sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC;
82	}
83	if (PDQ_IFNET(sc)->if_flags & IFF_LINK1) {
84	    sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT;
85	} else {
86	    sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT;
87	}
88	sc->sc_pdq->pdq_flags |= PDQ_RUNNING;
89	pdq_run(sc->sc_pdq);
90    } else {
91	PDQ_IFNET(sc)->if_drv_flags &= ~IFF_DRV_RUNNING;
92	sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING;
93	pdq_stop(sc->sc_pdq);
94    }
95}
96
97static void
98pdq_ifwatchdog(
99    struct ifnet *ifp)
100{
101    /*
102     * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT
103     * seconds.  Remove all queued packets.
104     */
105
106    ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
107    ifp->if_timer = 0;
108    for (;;) {
109	struct mbuf *m;
110	IFQ_DEQUEUE(&ifp->if_snd, m);
111	if (m == NULL)
112	    return;
113	PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m);
114    }
115}
116
117static void
118pdq_ifstart(
119    struct ifnet *ifp)
120{
121    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
122    struct mbuf *m;
123    int tx = 0;
124
125    if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
126	return;
127
128    if (PDQ_IFNET(sc)->if_timer == 0)
129	PDQ_IFNET(sc)->if_timer = PDQ_OS_TX_TIMEOUT;
130
131    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
132	PDQ_IFNET(sc)->if_drv_flags |= IFF_DRV_OACTIVE;
133	return;
134    }
135    sc->sc_flags |= PDQIF_DOWNCALL;
136    for (;; tx = 1) {
137	IF_DEQUEUE(&ifp->if_snd, m);
138	if (m == NULL)
139	    break;
140#if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
141	if ((m->m_flags & M_HASTXDMAMAP) == 0) {
142	    bus_dmamap_t map;
143	    if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
144		m->m_data[0] = PDQ_FDDI_PH0;
145		m->m_data[1] = PDQ_FDDI_PH1;
146		m->m_data[2] = PDQ_FDDI_PH2;
147	    }
148	    if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
149				   m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
150		if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
151					  BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
152		    bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
153				    BUS_DMASYNC_PREWRITE);
154		    M_SETCTX(m, map);
155		    m->m_flags |= M_HASTXDMAMAP;
156		}
157	    }
158	    if ((m->m_flags & M_HASTXDMAMAP) == 0)
159		break;
160	}
161#else
162	if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
163	    m->m_data[0] = PDQ_FDDI_PH0;
164	    m->m_data[1] = PDQ_FDDI_PH1;
165	    m->m_data[2] = PDQ_FDDI_PH2;
166	}
167#endif
168
169	if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
170	    break;
171    }
172    if (m != NULL) {
173	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
174	IF_PREPEND(&ifp->if_snd, m);
175    }
176    if (tx)
177	PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
178    sc->sc_flags &= ~PDQIF_DOWNCALL;
179}
180
181void
182pdq_os_receive_pdu(
183    pdq_t *pdq,
184    struct mbuf *m,
185    size_t pktlen,
186    int drop)
187{
188    pdq_softc_t *sc = pdq->pdq_os_ctx;
189    struct ifnet *ifp = PDQ_IFNET(sc);
190    struct fddi_header *fh;
191
192    ifp->if_ipackets++;
193#if defined(PDQ_BUS_DMA)
194    {
195	/*
196	 * Even though the first mbuf start at the first fddi header octet,
197	 * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier.  Any additional
198	 * mbufs will start normally.
199	 */
200	int offset = PDQ_OS_HDR_OFFSET;
201	struct mbuf *m0;
202	for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) {
203	    pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD);
204	    bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
205	    bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
206	    m0->m_flags &= ~M_HASRXDMAMAP;
207	    M_SETCTX(m0, NULL);
208	}
209    }
210#endif
211    m->m_pkthdr.len = pktlen;
212    fh = mtod(m, struct fddi_header *);
213    if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) {
214	ifp->if_iqdrops++;
215	ifp->if_ierrors++;
216	PDQ_OS_DATABUF_FREE(pdq, m);
217	return;
218    }
219
220    m->m_pkthdr.rcvif = ifp;
221    (*ifp->if_input)(ifp, m);
222}
223
224void
225pdq_os_restart_transmitter(
226    pdq_t *pdq)
227{
228    pdq_softc_t *sc = pdq->pdq_os_ctx;
229    PDQ_IFNET(sc)->if_drv_flags &= ~IFF_DRV_OACTIVE;
230    if (IFQ_IS_EMPTY(&PDQ_IFNET(sc)->if_snd) == 0) {
231	PDQ_IFNET(sc)->if_timer = PDQ_OS_TX_TIMEOUT;
232	if ((sc->sc_flags & PDQIF_DOWNCALL) == 0)
233	    pdq_ifstart(PDQ_IFNET(sc));
234    } else {
235	PDQ_IFNET(sc)->if_timer = 0;
236    }
237}
238
239void
240pdq_os_transmit_done(
241    pdq_t *pdq,
242    struct mbuf *m)
243{
244    pdq_softc_t *sc = pdq->pdq_os_ctx;
245#if NBPFILTER > 0
246    if (PQD_IFNET(sc)->if_bpf != NULL)
247	PDQ_BPF_MTAP(sc, m);
248#endif
249    PDQ_OS_DATABUF_FREE(pdq, m);
250    PDQ_IFNET(sc)->if_opackets++;
251}
252
253void
254pdq_os_addr_fill(
255    pdq_t *pdq,
256    pdq_lanaddr_t *addr,
257    size_t num_addrs)
258{
259    pdq_softc_t *sc = pdq->pdq_os_ctx;
260    struct ifnet *ifp;
261    struct ifmultiaddr *ifma;
262
263    ifp = sc->ifp;
264
265    /*
266     * ADDR_FILTER_SET is always issued before FILTER_SET so
267     * we can play with PDQ_ALLMULTI and not worry about
268     * queueing a FILTER_SET ourselves.
269     */
270
271    pdq->pdq_flags &= ~PDQ_ALLMULTI;
272#if defined(IFF_ALLMULTI)
273    PDQ_IFNET(sc)->if_flags &= ~IFF_ALLMULTI;
274#endif
275
276    IF_ADDR_LOCK(PDQ_IFNET(sc));
277    for (ifma = TAILQ_FIRST(&PDQ_IFNET(sc)->if_multiaddrs); ifma && num_addrs > 0;
278	 ifma = TAILQ_NEXT(ifma, ifma_link)) {
279	    char *mcaddr;
280	    if (ifma->ifma_addr->sa_family != AF_LINK)
281		    continue;
282	    mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
283	    ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0];
284	    ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1];
285	    ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2];
286	    addr++;
287	    num_addrs--;
288    }
289    IF_ADDR_UNLOCK(PDQ_IFNET(sc));
290    /*
291     * If not all the address fit into the CAM, turn on all-multicast mode.
292     */
293    if (ifma != NULL) {
294	pdq->pdq_flags |= PDQ_ALLMULTI;
295#if defined(IFF_ALLMULTI)
296	PDQ_IFNET(sc)->if_flags |= IFF_ALLMULTI;
297#endif
298    }
299}
300
301#if defined(IFM_FDDI)
302static int
303pdq_ifmedia_change(
304    struct ifnet *ifp)
305{
306    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
307
308    if (sc->sc_ifmedia.ifm_media & IFM_FDX) {
309	if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) {
310	    sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX;
311	    if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
312		pdq_run(sc->sc_pdq);
313	}
314    } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) {
315	sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX;
316	if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
317	    pdq_run(sc->sc_pdq);
318    }
319
320    return 0;
321}
322
323static void
324pdq_ifmedia_status(
325    struct ifnet *ifp,
326    struct ifmediareq *ifmr)
327{
328    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
329
330    ifmr->ifm_status = IFM_AVALID;
331    if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING)
332	ifmr->ifm_status |= IFM_ACTIVE;
333
334    ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX);
335    if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX)
336	ifmr->ifm_active |= IFM_FDX;
337}
338
339void
340pdq_os_update_status(
341    pdq_t *pdq,
342    const void *arg)
343{
344    pdq_softc_t * const sc = pdq->pdq_os_ctx;
345    const pdq_response_status_chars_get_t *rsp = arg;
346    int media = 0;
347
348    switch (rsp->status_chars_get.pmd_type[0]) {
349	case PDQ_PMD_TYPE_ANSI_MUTLI_MODE:         media = IFM_FDDI_MMF; break;
350	case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break;
351	case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break;
352	case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break;
353	default: media |= IFM_MANUAL;
354    }
355
356    if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
357	media |= IFM_FDDI_DA;
358
359    sc->sc_ifmedia.ifm_media = media | IFM_FDDI;
360}
361#endif /* defined(IFM_FDDI) */
362
363static int
364pdq_ifioctl(
365    struct ifnet *ifp,
366    u_long cmd,
367    caddr_t data)
368{
369    pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp);
370    int error = 0;
371
372    PDQ_LOCK(sc);
373
374    switch (cmd) {
375	case SIOCSIFFLAGS: {
376	    pdq_ifinit(sc);
377	    break;
378	}
379
380	case SIOCADDMULTI:
381	case SIOCDELMULTI: {
382	    if (PDQ_IFNET(sc)->if_drv_flags & IFF_DRV_RUNNING) {
383		    pdq_run(sc->sc_pdq);
384		error = 0;
385	    }
386	    break;
387	}
388
389#if defined(IFM_FDDI) && defined(SIOCSIFMEDIA)
390	case SIOCSIFMEDIA:
391	case SIOCGIFMEDIA: {
392	    struct ifreq *ifr = (struct ifreq *)data;
393	    error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
394	    break;
395	}
396#endif
397
398	default: {
399	    error = fddi_ioctl(ifp, cmd, data);
400	    break;
401	}
402    }
403
404    PDQ_UNLOCK(sc);
405    return error;
406}
407
408#ifndef IFF_NOTRAILERS
409#define	IFF_NOTRAILERS	0
410#endif
411
412void
413pdq_ifattach(pdq_softc_t *sc)
414{
415    struct ifnet *ifp;
416
417    ifp = PDQ_IFNET(sc) = if_alloc(IFT_FDDI);
418    if (ifp == NULL)
419	panic("%s: can not if_alloc()", device_get_nameunit(sc->dev));
420
421    mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
422	MTX_DEF | MTX_RECURSE);
423
424    ifp->if_softc = sc;
425    ifp->if_init = (if_init_f_t *)pdq_ifinit;
426    ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
427    ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
428
429    ifp->if_watchdog = pdq_ifwatchdog;
430
431    ifp->if_ioctl = pdq_ifioctl;
432    ifp->if_start = pdq_ifstart;
433
434#if defined(IFM_FDDI)
435    {
436	const int media = sc->sc_ifmedia.ifm_media;
437	ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
438		     pdq_ifmedia_change, pdq_ifmedia_status);
439	ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
440	ifmedia_set(&sc->sc_ifmedia, media);
441    }
442#endif
443
444    fddi_ifattach(ifp, FDDI_BPF_SUPPORTED);
445}
446
447void
448pdq_ifdetach (pdq_softc_t *sc)
449{
450    struct ifnet *ifp;
451
452    ifp = sc->ifp;
453
454    fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED);
455    if_free(ifp);
456    pdq_stop(sc->sc_pdq);
457    pdq_free(sc->dev);
458
459    return;
460}
461
462void
463pdq_free (device_t dev)
464{
465	pdq_softc_t *sc;
466
467	sc = device_get_softc(dev);
468
469	if (sc->io)
470		bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io);
471	if (sc->mem)
472		bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem);
473	if (sc->irq_ih)
474		bus_teardown_intr(dev, sc->irq, sc->irq_ih);
475	if (sc->irq)
476		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
477
478	/*
479	 * Destroy the mutex.
480	 */
481	if (mtx_initialized(&sc->mtx) != 0) {
482		mtx_destroy(&sc->mtx);
483	}
484
485	return;
486}
487
488#if defined(PDQ_BUS_DMA)
489int
490pdq_os_memalloc_contig(
491    pdq_t *pdq)
492{
493    pdq_softc_t * const sc = pdq->pdq_os_ctx;
494    bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
495    int db_nsegs = 0, ui_nsegs = 0;
496    int steps = 0;
497    int not_ok;
498
499    not_ok = bus_dmamem_alloc(sc->sc_dmatag,
500			 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
501			 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
502			 BUS_DMA_NOWAIT);
503    if (!not_ok) {
504	steps = 1;
505	not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
506				sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp,
507				BUS_DMA_NOWAIT);
508    }
509    if (!not_ok) {
510	steps = 2;
511	not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
512				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
513    }
514    if (!not_ok) {
515	steps = 3;
516	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
517				 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
518				 NULL, BUS_DMA_NOWAIT);
519    }
520    if (!not_ok) {
521	steps = 4;
522	pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
523	not_ok = bus_dmamem_alloc(sc->sc_dmatag,
524			 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
525			 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
526    }
527    if (!not_ok) {
528	steps = 5;
529	not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
530			    PDQ_OS_PAGESIZE,
531			    (caddr_t *) &pdq->pdq_unsolicited_info.ui_events,
532			    BUS_DMA_NOWAIT);
533    }
534    if (!not_ok) {
535	steps = 6;
536	not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
537				   PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
538				   &sc->sc_uimap);
539    }
540    if (!not_ok) {
541	steps = 7;
542	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
543				 pdq->pdq_unsolicited_info.ui_events,
544				 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
545    }
546    if (!not_ok) {
547	steps = 8;
548	pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
549	cb_segs[0] = db_segs[0];
550	cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
551	cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
552	not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
553				sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp,
554				BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
555    }
556    if (!not_ok) {
557	steps = 9;
558	not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
559				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
560    }
561    if (!not_ok) {
562	steps = 10;
563	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
564				 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
565				 NULL, BUS_DMA_NOWAIT);
566    }
567    if (!not_ok) {
568	pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
569	return not_ok;
570    }
571
572    switch (steps) {
573	case 11: {
574	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
575	    /* FALL THROUGH */
576	}
577	case 10: {
578	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
579	    /* FALL THROUGH */
580	}
581	case 9: {
582	    bus_dmamem_unmap(sc->sc_dmatag,
583			     (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
584	    /* FALL THROUGH */
585	}
586	case 8: {
587	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
588	    /* FALL THROUGH */
589	}
590	case 7: {
591	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
592	    /* FALL THROUGH */
593	}
594	case 6: {
595	    bus_dmamem_unmap(sc->sc_dmatag,
596			     (caddr_t) pdq->pdq_unsolicited_info.ui_events,
597			     PDQ_OS_PAGESIZE);
598	    /* FALL THROUGH */
599	}
600	case 5: {
601	    bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
602	    /* FALL THROUGH */
603	}
604	case 4: {
605	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
606	    /* FALL THROUGH */
607	}
608	case 3: {
609	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
610	    /* FALL THROUGH */
611	}
612	case 2: {
613	    bus_dmamem_unmap(sc->sc_dmatag,
614			     (caddr_t) pdq->pdq_dbp,
615			     sizeof(*pdq->pdq_dbp));
616	    /* FALL THROUGH */
617	}
618	case 1: {
619	    bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
620	    /* FALL THROUGH */
621	}
622    }
623
624    return not_ok;
625}
626
627extern void
628pdq_os_descriptor_block_sync(
629    pdq_os_ctx_t *sc,
630    size_t offset,
631    size_t length,
632    int ops)
633{
634    bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops);
635}
636
637extern void
638pdq_os_consumer_block_sync(
639    pdq_os_ctx_t *sc,
640    int ops)
641{
642    bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops);
643}
644
645extern void
646pdq_os_unsolicited_event_sync(
647    pdq_os_ctx_t *sc,
648    size_t offset,
649    size_t length,
650    int ops)
651{
652    bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops);
653}
654
655extern void
656pdq_os_databuf_sync(
657    pdq_os_ctx_t *sc,
658    struct mbuf *m,
659    size_t offset,
660    size_t length,
661    int ops)
662{
663    bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops);
664}
665
666extern void
667pdq_os_databuf_free(
668    pdq_os_ctx_t *sc,
669    struct mbuf *m)
670{
671    if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
672	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
673	bus_dmamap_unload(sc->sc_dmatag, map);
674	bus_dmamap_destroy(sc->sc_dmatag, map);
675	m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
676    }
677    m_freem(m);
678}
679
680extern struct mbuf *
681pdq_os_databuf_alloc(
682    pdq_os_ctx_t *sc)
683{
684    struct mbuf *m;
685    bus_dmamap_t map;
686
687    MGETHDR(m, M_DONTWAIT, MT_DATA);
688    if (m == NULL) {
689	printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
690	return NULL;
691    }
692    MCLGET(m, M_DONTWAIT);
693    if ((m->m_flags & M_EXT) == 0) {
694	printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
695        m_free(m);
696	return NULL;
697    }
698    m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;
699
700    if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
701			   1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
702	printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname);
703	m_free(m);
704	return NULL;
705    }
706    if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
707    			     BUS_DMA_READ|BUS_DMA_NOWAIT)) {
708	printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname);
709	bus_dmamap_destroy(sc->sc_dmatag, map);
710	m_free(m);
711	return NULL;
712    }
713    m->m_flags |= M_HASRXDMAMAP;
714    M_SETCTX(m, map);
715    return m;
716}
717#endif
718