pdq_ifsubr.c revision 144044
1/*	$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $	*/
2
3/*-
4 * Copyright (c) 1995, 1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/dev/pdq/pdq_ifsubr.c 144044 2005-03-24 01:58:20Z mdodd $");
31
32/*
33 * DEC PDQ FDDI Controller; code for BSD derived operating systems
34 *
35 *	This module provide bus independent BSD specific O/S functions.
36 *	(ie. it provides an ifnet interface to the rest of the system)
37 */
38
39#ifdef __NetBSD__
40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $");
42#endif
43
44#define PDQ_OSSUPPORT
45
46#include <sys/param.h>
47#include <sys/systm.h>
48#include <sys/kernel.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/malloc.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54
55#include <sys/module.h>
56#include <sys/bus.h>
57
58#include <machine/bus_memio.h>
59#include <machine/bus_pio.h>
60#include <machine/bus.h>
61#include <machine/resource.h>
62#include <sys/rman.h>
63
64#include <net/if.h>
65#include <net/if_arp.h>
66#include <net/if_dl.h>
67#include <net/if_media.h>
68#include <net/fddi.h>
69
70#include <net/bpf.h>
71
72#include <dev/pdq/pdq_freebsd.h>
73#include <dev/pdq/pdqreg.h>
74
75devclass_t pdq_devclass;
76
77static void
78pdq_ifinit(
79    pdq_softc_t *sc)
80{
81    if (sc->sc_if.if_flags & IFF_UP) {
82	sc->sc_if.if_flags |= IFF_RUNNING;
83	if (sc->sc_if.if_flags & IFF_PROMISC) {
84	    sc->sc_pdq->pdq_flags |= PDQ_PROMISC;
85	} else {
86	    sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC;
87	}
88	if (sc->sc_if.if_flags & IFF_LINK1) {
89	    sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT;
90	} else {
91	    sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT;
92	}
93	sc->sc_pdq->pdq_flags |= PDQ_RUNNING;
94	pdq_run(sc->sc_pdq);
95    } else {
96	sc->sc_if.if_flags &= ~IFF_RUNNING;
97	sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING;
98	pdq_stop(sc->sc_pdq);
99    }
100}
101
102static void
103pdq_ifwatchdog(
104    struct ifnet *ifp)
105{
106    /*
107     * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT
108     * seconds.  Remove all queued packets.
109     */
110
111    ifp->if_flags &= ~IFF_OACTIVE;
112    ifp->if_timer = 0;
113    for (;;) {
114	struct mbuf *m;
115	IFQ_DEQUEUE(&ifp->if_snd, m);
116	if (m == NULL)
117	    return;
118	PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m);
119    }
120}
121
122static void
123pdq_ifstart(
124    struct ifnet *ifp)
125{
126    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
127    struct mbuf *m;
128    int tx = 0;
129
130    if ((ifp->if_flags & IFF_RUNNING) == 0)
131	return;
132
133    if (sc->sc_if.if_timer == 0)
134	sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
135
136    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
137	sc->sc_if.if_flags |= IFF_OACTIVE;
138	return;
139    }
140    sc->sc_flags |= PDQIF_DOWNCALL;
141    for (;; tx = 1) {
142	IF_DEQUEUE(&ifp->if_snd, m);
143	if (m == NULL)
144	    break;
145#if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
146	if ((m->m_flags & M_HASTXDMAMAP) == 0) {
147	    bus_dmamap_t map;
148	    if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
149		m->m_data[0] = PDQ_FDDI_PH0;
150		m->m_data[1] = PDQ_FDDI_PH1;
151		m->m_data[2] = PDQ_FDDI_PH2;
152	    }
153	    if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
154				   m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
155		if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
156					  BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
157		    bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
158				    BUS_DMASYNC_PREWRITE);
159		    M_SETCTX(m, map);
160		    m->m_flags |= M_HASTXDMAMAP;
161		}
162	    }
163	    if ((m->m_flags & M_HASTXDMAMAP) == 0)
164		break;
165	}
166#else
167	if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
168	    m->m_data[0] = PDQ_FDDI_PH0;
169	    m->m_data[1] = PDQ_FDDI_PH1;
170	    m->m_data[2] = PDQ_FDDI_PH2;
171	}
172#endif
173
174	if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
175	    break;
176    }
177    if (m != NULL) {
178	ifp->if_flags |= IFF_OACTIVE;
179	IF_PREPEND(&ifp->if_snd, m);
180    }
181    if (tx)
182	PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
183    sc->sc_flags &= ~PDQIF_DOWNCALL;
184}
185
186void
187pdq_os_receive_pdu(
188    pdq_t *pdq,
189    struct mbuf *m,
190    size_t pktlen,
191    int drop)
192{
193    pdq_softc_t *sc = pdq->pdq_os_ctx;
194    struct ifnet *ifp = &sc->sc_if;
195    struct fddi_header *fh;
196
197    ifp->if_ipackets++;
198#if defined(PDQ_BUS_DMA)
199    {
200	/*
201	 * Even though the first mbuf start at the first fddi header octet,
202	 * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier.  Any additional
203	 * mbufs will start normally.
204	 */
205	int offset = PDQ_OS_HDR_OFFSET;
206	struct mbuf *m0;
207	for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) {
208	    pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD);
209	    bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
210	    bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
211	    m0->m_flags &= ~M_HASRXDMAMAP;
212	    M_SETCTX(m0, NULL);
213	}
214    }
215#endif
216    m->m_pkthdr.len = pktlen;
217#if NBPFILTER > 0 && defined(__NetBSD__)
218    if (sc->sc_bpf != NULL)
219	PDQ_BPF_MTAP(sc, m);
220#endif
221    fh = mtod(m, struct fddi_header *);
222    if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) {
223	ifp->if_iqdrops++;
224	ifp->if_ierrors++;
225	PDQ_OS_DATABUF_FREE(pdq, m);
226	return;
227    }
228
229    m->m_pkthdr.rcvif = ifp;
230    (*ifp->if_input)(ifp, m);
231}
232
233void
234pdq_os_restart_transmitter(
235    pdq_t *pdq)
236{
237    pdq_softc_t *sc = pdq->pdq_os_ctx;
238    sc->sc_if.if_flags &= ~IFF_OACTIVE;
239    if (IFQ_IS_EMPTY(&sc->sc_if.if_snd) == 0) {
240	sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
241	if ((sc->sc_flags & PDQIF_DOWNCALL) == 0)
242	    pdq_ifstart(&sc->sc_if);
243    } else {
244	sc->sc_if.if_timer = 0;
245    }
246}
247
248void
249pdq_os_transmit_done(
250    pdq_t *pdq,
251    struct mbuf *m)
252{
253    pdq_softc_t *sc = pdq->pdq_os_ctx;
254#if NBPFILTER > 0
255    if (sc->sc_bpf != NULL)
256	PDQ_BPF_MTAP(sc, m);
257#endif
258    PDQ_OS_DATABUF_FREE(pdq, m);
259    sc->sc_if.if_opackets++;
260}
261
262void
263pdq_os_addr_fill(
264    pdq_t *pdq,
265    pdq_lanaddr_t *addr,
266    size_t num_addrs)
267{
268    pdq_softc_t *sc = pdq->pdq_os_ctx;
269    struct ifnet *ifp;
270    struct ifmultiaddr *ifma;
271
272    ifp = &sc->arpcom.ac_if;
273
274    /*
275     * ADDR_FILTER_SET is always issued before FILTER_SET so
276     * we can play with PDQ_ALLMULTI and not worry about
277     * queueing a FILTER_SET ourselves.
278     */
279
280    pdq->pdq_flags &= ~PDQ_ALLMULTI;
281#if defined(IFF_ALLMULTI)
282    sc->sc_if.if_flags &= ~IFF_ALLMULTI;
283#endif
284
285    for (ifma = TAILQ_FIRST(&sc->sc_if.if_multiaddrs); ifma && num_addrs > 0;
286	 ifma = TAILQ_NEXT(ifma, ifma_link)) {
287	    char *mcaddr;
288	    if (ifma->ifma_addr->sa_family != AF_LINK)
289		    continue;
290	    mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
291	    ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0];
292	    ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1];
293	    ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2];
294	    addr++;
295	    num_addrs--;
296    }
297    /*
298     * If not all the address fit into the CAM, turn on all-multicast mode.
299     */
300    if (ifma != NULL) {
301	pdq->pdq_flags |= PDQ_ALLMULTI;
302#if defined(IFF_ALLMULTI)
303	sc->sc_if.if_flags |= IFF_ALLMULTI;
304#endif
305    }
306}
307
308#if defined(IFM_FDDI)
309static int
310pdq_ifmedia_change(
311    struct ifnet *ifp)
312{
313    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
314
315    if (sc->sc_ifmedia.ifm_media & IFM_FDX) {
316	if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) {
317	    sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX;
318	    if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
319		pdq_run(sc->sc_pdq);
320	}
321    } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) {
322	sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX;
323	if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
324	    pdq_run(sc->sc_pdq);
325    }
326
327    return 0;
328}
329
330static void
331pdq_ifmedia_status(
332    struct ifnet *ifp,
333    struct ifmediareq *ifmr)
334{
335    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
336
337    ifmr->ifm_status = IFM_AVALID;
338    if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING)
339	ifmr->ifm_status |= IFM_ACTIVE;
340
341    ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX);
342    if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX)
343	ifmr->ifm_active |= IFM_FDX;
344}
345
346void
347pdq_os_update_status(
348    pdq_t *pdq,
349    const void *arg)
350{
351    pdq_softc_t * const sc = pdq->pdq_os_ctx;
352    const pdq_response_status_chars_get_t *rsp = arg;
353    int media = 0;
354
355    switch (rsp->status_chars_get.pmd_type[0]) {
356	case PDQ_PMD_TYPE_ANSI_MUTLI_MODE:         media = IFM_FDDI_MMF; break;
357	case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break;
358	case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break;
359	case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break;
360	default: media |= IFM_MANUAL;
361    }
362
363    if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
364	media |= IFM_FDDI_DA;
365
366    sc->sc_ifmedia.ifm_media = media | IFM_FDDI;
367}
368#endif /* defined(IFM_FDDI) */
369
370static int
371pdq_ifioctl(
372    struct ifnet *ifp,
373    u_long cmd,
374    caddr_t data)
375{
376    pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp);
377    int error = 0;
378
379    PDQ_LOCK(sc);
380
381    switch (cmd) {
382	case SIOCSIFFLAGS: {
383	    pdq_ifinit(sc);
384	    break;
385	}
386
387	case SIOCADDMULTI:
388	case SIOCDELMULTI: {
389	    if (sc->sc_if.if_flags & IFF_RUNNING) {
390		    pdq_run(sc->sc_pdq);
391		error = 0;
392	    }
393	    break;
394	}
395
396#if defined(IFM_FDDI) && defined(SIOCSIFMEDIA)
397	case SIOCSIFMEDIA:
398	case SIOCGIFMEDIA: {
399	    struct ifreq *ifr = (struct ifreq *)data;
400	    error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
401	    break;
402	}
403#endif
404
405	default: {
406	    error = fddi_ioctl(ifp, cmd, data);
407	    break;
408	}
409    }
410
411    PDQ_UNLOCK(sc);
412    return error;
413}
414
415#ifndef IFF_NOTRAILERS
416#define	IFF_NOTRAILERS	0
417#endif
418
419void
420pdq_ifattach(pdq_softc_t *sc)
421{
422    struct ifnet *ifp = &sc->sc_if;
423
424    mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
425	MTX_DEF | MTX_RECURSE);
426
427    ifp->if_softc = sc;
428    ifp->if_init = (if_init_f_t *)pdq_ifinit;
429    ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
430    ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
431
432#if (defined(__FreeBSD__) && BSD >= 199506) || defined(__NetBSD__)
433    ifp->if_watchdog = pdq_ifwatchdog;
434#else
435    ifp->if_watchdog = ifwatchdog;
436#endif
437
438    ifp->if_ioctl = pdq_ifioctl;
439#if !defined(__NetBSD__) && !defined(__FreeBSD__)
440    ifp->if_output = fddi_output;
441#endif
442    ifp->if_start = pdq_ifstart;
443
444#if defined(IFM_FDDI)
445    {
446	const int media = sc->sc_ifmedia.ifm_media;
447	ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
448		     pdq_ifmedia_change, pdq_ifmedia_status);
449	ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
450	ifmedia_set(&sc->sc_ifmedia, media);
451    }
452#endif
453
454#if defined(__NetBSD__)
455    if_attach(ifp);
456    fddi_ifattach(ifp, (caddr_t)&sc->sc_pdq->pdq_hwaddr);
457#else
458    fddi_ifattach(ifp, FDDI_BPF_SUPPORTED);
459#endif
460}
461
462void
463pdq_ifdetach (pdq_softc_t *sc)
464{
465    struct ifnet *ifp;
466
467    ifp = &sc->arpcom.ac_if;
468
469    fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED);
470    pdq_stop(sc->sc_pdq);
471    pdq_free(sc->dev);
472
473    return;
474}
475
476void
477pdq_free (device_t dev)
478{
479	pdq_softc_t *sc;
480
481	sc = device_get_softc(dev);
482
483	if (sc->io)
484		bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io);
485	if (sc->mem)
486		bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem);
487	if (sc->irq_ih)
488		bus_teardown_intr(dev, sc->irq, sc->irq_ih);
489	if (sc->irq)
490		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
491
492	/*
493	 * Destroy the mutex.
494	 */
495	if (mtx_initialized(&sc->mtx) != 0) {
496		mtx_destroy(&sc->mtx);
497	}
498
499	return;
500}
501
502#if defined(PDQ_BUS_DMA)
503int
504pdq_os_memalloc_contig(
505    pdq_t *pdq)
506{
507    pdq_softc_t * const sc = pdq->pdq_os_ctx;
508    bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
509    int db_nsegs = 0, ui_nsegs = 0;
510    int steps = 0;
511    int not_ok;
512
513    not_ok = bus_dmamem_alloc(sc->sc_dmatag,
514			 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
515			 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
516			 BUS_DMA_NOWAIT);
517    if (!not_ok) {
518	steps = 1;
519	not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
520				sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp,
521				BUS_DMA_NOWAIT);
522    }
523    if (!not_ok) {
524	steps = 2;
525	not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
526				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
527    }
528    if (!not_ok) {
529	steps = 3;
530	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
531				 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
532				 NULL, BUS_DMA_NOWAIT);
533    }
534    if (!not_ok) {
535	steps = 4;
536	pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
537	not_ok = bus_dmamem_alloc(sc->sc_dmatag,
538			 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
539			 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
540    }
541    if (!not_ok) {
542	steps = 5;
543	not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
544			    PDQ_OS_PAGESIZE,
545			    (caddr_t *) &pdq->pdq_unsolicited_info.ui_events,
546			    BUS_DMA_NOWAIT);
547    }
548    if (!not_ok) {
549	steps = 6;
550	not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
551				   PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
552				   &sc->sc_uimap);
553    }
554    if (!not_ok) {
555	steps = 7;
556	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
557				 pdq->pdq_unsolicited_info.ui_events,
558				 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
559    }
560    if (!not_ok) {
561	steps = 8;
562	pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
563	cb_segs[0] = db_segs[0];
564	cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
565	cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
566	not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
567				sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp,
568				BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
569    }
570    if (!not_ok) {
571	steps = 9;
572	not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
573				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
574    }
575    if (!not_ok) {
576	steps = 10;
577	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
578				 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
579				 NULL, BUS_DMA_NOWAIT);
580    }
581    if (!not_ok) {
582	pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
583	return not_ok;
584    }
585
586    switch (steps) {
587	case 11: {
588	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
589	    /* FALL THROUGH */
590	}
591	case 10: {
592	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
593	    /* FALL THROUGH */
594	}
595	case 9: {
596	    bus_dmamem_unmap(sc->sc_dmatag,
597			     (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
598	    /* FALL THROUGH */
599	}
600	case 8: {
601	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
602	    /* FALL THROUGH */
603	}
604	case 7: {
605	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
606	    /* FALL THROUGH */
607	}
608	case 6: {
609	    bus_dmamem_unmap(sc->sc_dmatag,
610			     (caddr_t) pdq->pdq_unsolicited_info.ui_events,
611			     PDQ_OS_PAGESIZE);
612	    /* FALL THROUGH */
613	}
614	case 5: {
615	    bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
616	    /* FALL THROUGH */
617	}
618	case 4: {
619	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
620	    /* FALL THROUGH */
621	}
622	case 3: {
623	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
624	    /* FALL THROUGH */
625	}
626	case 2: {
627	    bus_dmamem_unmap(sc->sc_dmatag,
628			     (caddr_t) pdq->pdq_dbp,
629			     sizeof(*pdq->pdq_dbp));
630	    /* FALL THROUGH */
631	}
632	case 1: {
633	    bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
634	    /* FALL THROUGH */
635	}
636    }
637
638    return not_ok;
639}
640
641extern void
642pdq_os_descriptor_block_sync(
643    pdq_os_ctx_t *sc,
644    size_t offset,
645    size_t length,
646    int ops)
647{
648    bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops);
649}
650
651extern void
652pdq_os_consumer_block_sync(
653    pdq_os_ctx_t *sc,
654    int ops)
655{
656    bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops);
657}
658
659extern void
660pdq_os_unsolicited_event_sync(
661    pdq_os_ctx_t *sc,
662    size_t offset,
663    size_t length,
664    int ops)
665{
666    bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops);
667}
668
669extern void
670pdq_os_databuf_sync(
671    pdq_os_ctx_t *sc,
672    struct mbuf *m,
673    size_t offset,
674    size_t length,
675    int ops)
676{
677    bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops);
678}
679
680extern void
681pdq_os_databuf_free(
682    pdq_os_ctx_t *sc,
683    struct mbuf *m)
684{
685    if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
686	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
687	bus_dmamap_unload(sc->sc_dmatag, map);
688	bus_dmamap_destroy(sc->sc_dmatag, map);
689	m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
690    }
691    m_freem(m);
692}
693
694extern struct mbuf *
695pdq_os_databuf_alloc(
696    pdq_os_ctx_t *sc)
697{
698    struct mbuf *m;
699    bus_dmamap_t map;
700
701    MGETHDR(m, M_DONTWAIT, MT_DATA);
702    if (m == NULL) {
703	printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
704	return NULL;
705    }
706    MCLGET(m, M_DONTWAIT);
707    if ((m->m_flags & M_EXT) == 0) {
708	printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
709        m_free(m);
710	return NULL;
711    }
712    m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;
713
714    if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
715			   1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
716	printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname);
717	m_free(m);
718	return NULL;
719    }
720    if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
721    			     BUS_DMA_READ|BUS_DMA_NOWAIT)) {
722	printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname);
723	bus_dmamap_destroy(sc->sc_dmatag, map);
724	m_free(m);
725	return NULL;
726    }
727    m->m_flags |= M_HASRXDMAMAP;
728    M_SETCTX(m, map);
729    return m;
730}
731#endif
732