pdq_ifsubr.c revision 93383
1/*	$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $	*/
2
3/*-
4 * Copyright (c) 1995, 1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$
27 * $FreeBSD: head/sys/dev/pdq/pdq_ifsubr.c 93383 2002-03-29 11:22:22Z mdodd $
28 */
29
30/*
31 * DEC PDQ FDDI Controller; code for BSD derived operating systems
32 *
33 *	This module provide bus independent BSD specific O/S functions.
34 *	(ie. it provides an ifnet interface to the rest of the system)
35 */
36
37#ifdef __NetBSD__
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $");
40#endif
41
42#define PDQ_OSSUPPORT
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/malloc.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52
53#include <sys/module.h>
54#include <sys/bus.h>
55
56#include <machine/bus_memio.h>
57#include <machine/bus_pio.h>
58#include <machine/bus.h>
59#include <machine/resource.h>
60#include <sys/rman.h>
61
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/fddi.h>
67
68#include <net/bpf.h>
69
70#include <dev/pdq/pdq_freebsd.h>
71#include <dev/pdq/pdqreg.h>
72
73devclass_t pdq_devclass;
74
75static void
76pdq_ifinit(
77    pdq_softc_t *sc)
78{
79    if (sc->sc_if.if_flags & IFF_UP) {
80	sc->sc_if.if_flags |= IFF_RUNNING;
81	if (sc->sc_if.if_flags & IFF_PROMISC) {
82	    sc->sc_pdq->pdq_flags |= PDQ_PROMISC;
83	} else {
84	    sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC;
85	}
86	if (sc->sc_if.if_flags & IFF_LINK1) {
87	    sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT;
88	} else {
89	    sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT;
90	}
91	sc->sc_pdq->pdq_flags |= PDQ_RUNNING;
92	pdq_run(sc->sc_pdq);
93    } else {
94	sc->sc_if.if_flags &= ~IFF_RUNNING;
95	sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING;
96	pdq_stop(sc->sc_pdq);
97    }
98}
99
100static void
101pdq_ifwatchdog(
102    struct ifnet *ifp)
103{
104    /*
105     * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT
106     * seconds.  Remove all queued packets.
107     */
108
109    ifp->if_flags &= ~IFF_OACTIVE;
110    ifp->if_timer = 0;
111    for (;;) {
112	struct mbuf *m;
113	IFQ_DEQUEUE(&ifp->if_snd, m);
114	if (m == NULL)
115	    return;
116	PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m);
117    }
118}
119
120static void
121pdq_ifstart(
122    struct ifnet *ifp)
123{
124    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
125    struct mbuf *m;
126    int tx = 0;
127
128    if ((ifp->if_flags & IFF_RUNNING) == 0)
129	return;
130
131    if (sc->sc_if.if_timer == 0)
132	sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
133
134    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
135	sc->sc_if.if_flags |= IFF_OACTIVE;
136	return;
137    }
138    sc->sc_flags |= PDQIF_DOWNCALL;
139    for (;; tx = 1) {
140	IF_DEQUEUE(&ifp->if_snd, m);
141	if (m == NULL)
142	    break;
143#if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
144	if ((m->m_flags & M_HASTXDMAMAP) == 0) {
145	    bus_dmamap_t map;
146	    if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
147		m->m_data[0] = PDQ_FDDI_PH0;
148		m->m_data[1] = PDQ_FDDI_PH1;
149		m->m_data[2] = PDQ_FDDI_PH2;
150	    }
151	    if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
152				   m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
153		if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
154					  BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
155		    bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
156				    BUS_DMASYNC_PREWRITE);
157		    M_SETCTX(m, map);
158		    m->m_flags |= M_HASTXDMAMAP;
159		}
160	    }
161	    if ((m->m_flags & M_HASTXDMAMAP) == 0)
162		break;
163	}
164#else
165	if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
166	    m->m_data[0] = PDQ_FDDI_PH0;
167	    m->m_data[1] = PDQ_FDDI_PH1;
168	    m->m_data[2] = PDQ_FDDI_PH2;
169	}
170#endif
171
172	if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
173	    break;
174    }
175    if (m != NULL) {
176	ifp->if_flags |= IFF_OACTIVE;
177	IF_PREPEND(&ifp->if_snd, m);
178    }
179    if (tx)
180	PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
181    sc->sc_flags &= ~PDQIF_DOWNCALL;
182}
183
184void
185pdq_os_receive_pdu(
186    pdq_t *pdq,
187    struct mbuf *m,
188    size_t pktlen,
189    int drop)
190{
191    pdq_softc_t *sc = pdq->pdq_os_ctx;
192    struct fddi_header *fh;
193
194    sc->sc_if.if_ipackets++;
195#if defined(PDQ_BUS_DMA)
196    {
197	/*
198	 * Even though the first mbuf start at the first fddi header octet,
199	 * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier.  Any additional
200	 * mbufs will start normally.
201	 */
202	int offset = PDQ_OS_HDR_OFFSET;
203	struct mbuf *m0;
204	for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) {
205	    pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD);
206	    bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
207	    bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
208	    m0->m_flags &= ~M_HASRXDMAMAP;
209	    M_SETCTX(m0, NULL);
210	}
211    }
212#endif
213    m->m_pkthdr.len = pktlen;
214#if NBPFILTER > 0
215    if (sc->sc_bpf != NULL)
216	PDQ_BPF_MTAP(sc, m);
217#endif
218    fh = mtod(m, struct fddi_header *);
219    if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) {
220	sc->sc_if.if_iqdrops++;
221	sc->sc_if.if_ierrors++;
222	PDQ_OS_DATABUF_FREE(pdq, m);
223	return;
224    }
225
226    m_adj(m, FDDI_HDR_LEN);
227    m->m_pkthdr.rcvif = &sc->sc_if;
228    fddi_input(&sc->sc_if, fh, m);
229}
230
231void
232pdq_os_restart_transmitter(
233    pdq_t *pdq)
234{
235    pdq_softc_t *sc = pdq->pdq_os_ctx;
236    sc->sc_if.if_flags &= ~IFF_OACTIVE;
237    if (IFQ_IS_EMPTY(&sc->sc_if.if_snd) == 0) {
238	sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
239	if ((sc->sc_flags & PDQIF_DOWNCALL) == 0)
240	    pdq_ifstart(&sc->sc_if);
241    } else {
242	sc->sc_if.if_timer = 0;
243    }
244}
245
246void
247pdq_os_transmit_done(
248    pdq_t *pdq,
249    struct mbuf *m)
250{
251    pdq_softc_t *sc = pdq->pdq_os_ctx;
252#if NBPFILTER > 0
253    if (sc->sc_bpf != NULL)
254	PDQ_BPF_MTAP(sc, m);
255#endif
256    PDQ_OS_DATABUF_FREE(pdq, m);
257    sc->sc_if.if_opackets++;
258}
259
260void
261pdq_os_addr_fill(
262    pdq_t *pdq,
263    pdq_lanaddr_t *addr,
264    size_t num_addrs)
265{
266    pdq_softc_t *sc = pdq->pdq_os_ctx;
267    struct ifnet *ifp;
268    struct ifmultiaddr *ifma;
269
270    ifp = &sc->arpcom.ac_if;
271
272    /*
273     * ADDR_FILTER_SET is always issued before FILTER_SET so
274     * we can play with PDQ_ALLMULTI and not worry about
275     * queueing a FILTER_SET ourselves.
276     */
277
278    pdq->pdq_flags &= ~PDQ_ALLMULTI;
279#if defined(IFF_ALLMULTI)
280    sc->sc_if.if_flags &= ~IFF_ALLMULTI;
281#endif
282
283    for (ifma = TAILQ_FIRST(&sc->sc_if.if_multiaddrs); ifma && num_addrs > 0;
284	 ifma = TAILQ_NEXT(ifma, ifma_link)) {
285	    char *mcaddr;
286	    if (ifma->ifma_addr->sa_family != AF_LINK)
287		    continue;
288	    mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
289	    ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0];
290	    ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1];
291	    ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2];
292	    addr++;
293	    num_addrs--;
294    }
295    /*
296     * If not all the address fit into the CAM, turn on all-multicast mode.
297     */
298    if (ifma != NULL) {
299	pdq->pdq_flags |= PDQ_ALLMULTI;
300#if defined(IFF_ALLMULTI)
301	sc->sc_if.if_flags |= IFF_ALLMULTI;
302#endif
303    }
304}
305
306#if defined(IFM_FDDI)
307static int
308pdq_ifmedia_change(
309    struct ifnet *ifp)
310{
311    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
312
313    if (sc->sc_ifmedia.ifm_media & IFM_FDX) {
314	if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) {
315	    sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX;
316	    if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
317		pdq_run(sc->sc_pdq);
318	}
319    } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) {
320	sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX;
321	if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
322	    pdq_run(sc->sc_pdq);
323    }
324
325    return 0;
326}
327
328static void
329pdq_ifmedia_status(
330    struct ifnet *ifp,
331    struct ifmediareq *ifmr)
332{
333    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
334
335    ifmr->ifm_status = IFM_AVALID;
336    if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING)
337	ifmr->ifm_status |= IFM_ACTIVE;
338
339    ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX);
340    if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX)
341	ifmr->ifm_active |= IFM_FDX;
342}
343
344void
345pdq_os_update_status(
346    pdq_t *pdq,
347    const void *arg)
348{
349    pdq_softc_t * const sc = pdq->pdq_os_ctx;
350    const pdq_response_status_chars_get_t *rsp = arg;
351    int media = 0;
352
353    switch (rsp->status_chars_get.pmd_type[0]) {
354	case PDQ_PMD_TYPE_ANSI_MUTLI_MODE:         media = IFM_FDDI_MMF; break;
355	case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break;
356	case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break;
357	case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break;
358	default: media |= IFM_MANUAL;
359    }
360
361    if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
362	media |= IFM_FDDI_DA;
363
364    sc->sc_ifmedia.ifm_media = media | IFM_FDDI;
365}
366#endif /* defined(IFM_FDDI) */
367
368static int
369pdq_ifioctl(
370    struct ifnet *ifp,
371    u_long cmd,
372    caddr_t data)
373{
374    pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp);
375    int error = 0;
376
377    PDQ_LOCK(sc);
378
379    switch (cmd) {
380	case SIOCSIFMTU:
381	case SIOCGIFADDR:
382	case SIOCSIFADDR: {
383	    error = fddi_ioctl(ifp, cmd, data);
384	    break;
385	}
386
387	case SIOCSIFFLAGS: {
388	    pdq_ifinit(sc);
389	    break;
390	}
391
392	case SIOCADDMULTI:
393	case SIOCDELMULTI: {
394	    if (sc->sc_if.if_flags & IFF_RUNNING) {
395		    pdq_run(sc->sc_pdq);
396		error = 0;
397	    }
398	    break;
399	}
400
401#if defined(IFM_FDDI) && defined(SIOCSIFMEDIA)
402	case SIOCSIFMEDIA:
403	case SIOCGIFMEDIA: {
404	    struct ifreq *ifr = (struct ifreq *)data;
405	    error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
406	    break;
407	}
408#endif
409
410	default: {
411	    error = EINVAL;
412	    break;
413	}
414    }
415
416    PDQ_UNLOCK(sc);
417    return error;
418}
419
420#ifndef IFF_NOTRAILERS
421#define	IFF_NOTRAILERS	0
422#endif
423
424void
425pdq_ifattach(pdq_softc_t *sc)
426{
427    struct ifnet *ifp = &sc->sc_if;
428
429    mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_DEF | MTX_RECURSE);
430
431    ifp->if_softc = sc;
432    ifp->if_init = (if_init_f_t *)pdq_ifinit;
433    ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
434    ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
435
436#if (defined(__FreeBSD__) && BSD >= 199506) || defined(__NetBSD__)
437    ifp->if_watchdog = pdq_ifwatchdog;
438#else
439    ifp->if_watchdog = ifwatchdog;
440#endif
441
442    ifp->if_ioctl = pdq_ifioctl;
443#if !defined(__NetBSD__) && !defined(__FreeBSD__)
444    ifp->if_output = fddi_output;
445#endif
446    ifp->if_start = pdq_ifstart;
447
448#if defined(IFM_FDDI)
449    {
450	const int media = sc->sc_ifmedia.ifm_media;
451	ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
452		     pdq_ifmedia_change, pdq_ifmedia_status);
453	ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
454	ifmedia_set(&sc->sc_ifmedia, media);
455    }
456#endif
457
458    if_attach(ifp);
459#if defined(__NetBSD__)
460    fddi_ifattach(ifp, (caddr_t)&sc->sc_pdq->pdq_hwaddr);
461#else
462    fddi_ifattach(ifp, FDDI_BPF_SUPPORTED);
463#endif
464}
465
466void
467pdq_ifdetach (pdq_softc_t *sc)
468{
469    struct ifnet *ifp;
470
471    ifp = &sc->arpcom.ac_if;
472
473    fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED);
474    pdq_stop(sc->sc_pdq);
475    pdq_free(sc->dev);
476
477    return;
478}
479
480void
481pdq_free (device_t dev)
482{
483	pdq_softc_t *sc;
484
485	sc = device_get_softc(dev);
486
487	if (sc->io)
488		bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io);
489	if (sc->mem)
490		bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem);
491	if (sc->irq_ih)
492		bus_teardown_intr(dev, sc->irq, sc->irq_ih);
493	if (sc->irq)
494		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
495
496	/*
497	 * Destroy the mutex.
498	 */
499	if (mtx_initialized(&sc->mtx) != 0) {
500		mtx_destroy(&sc->mtx);
501	}
502
503	return;
504}
505
506#if defined(PDQ_BUS_DMA)
507int
508pdq_os_memalloc_contig(
509    pdq_t *pdq)
510{
511    pdq_softc_t * const sc = pdq->pdq_os_ctx;
512    bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
513    int db_nsegs = 0, ui_nsegs = 0;
514    int steps = 0;
515    int not_ok;
516
517    not_ok = bus_dmamem_alloc(sc->sc_dmatag,
518			 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
519			 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
520			 BUS_DMA_NOWAIT);
521    if (!not_ok) {
522	steps = 1;
523	not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
524				sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp,
525				BUS_DMA_NOWAIT);
526    }
527    if (!not_ok) {
528	steps = 2;
529	not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
530				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
531    }
532    if (!not_ok) {
533	steps = 3;
534	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
535				 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
536				 NULL, BUS_DMA_NOWAIT);
537    }
538    if (!not_ok) {
539	steps = 4;
540	pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
541	not_ok = bus_dmamem_alloc(sc->sc_dmatag,
542			 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
543			 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
544    }
545    if (!not_ok) {
546	steps = 5;
547	not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
548			    PDQ_OS_PAGESIZE,
549			    (caddr_t *) &pdq->pdq_unsolicited_info.ui_events,
550			    BUS_DMA_NOWAIT);
551    }
552    if (!not_ok) {
553	steps = 6;
554	not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
555				   PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
556				   &sc->sc_uimap);
557    }
558    if (!not_ok) {
559	steps = 7;
560	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
561				 pdq->pdq_unsolicited_info.ui_events,
562				 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
563    }
564    if (!not_ok) {
565	steps = 8;
566	pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
567	cb_segs[0] = db_segs[0];
568	cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
569	cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
570	not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
571				sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp,
572				BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
573    }
574    if (!not_ok) {
575	steps = 9;
576	not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
577				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
578    }
579    if (!not_ok) {
580	steps = 10;
581	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
582				 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
583				 NULL, BUS_DMA_NOWAIT);
584    }
585    if (!not_ok) {
586	pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
587	return not_ok;
588    }
589
590    switch (steps) {
591	case 11: {
592	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
593	    /* FALL THROUGH */
594	}
595	case 10: {
596	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
597	    /* FALL THROUGH */
598	}
599	case 9: {
600	    bus_dmamem_unmap(sc->sc_dmatag,
601			     (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
602	    /* FALL THROUGH */
603	}
604	case 8: {
605	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
606	    /* FALL THROUGH */
607	}
608	case 7: {
609	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
610	    /* FALL THROUGH */
611	}
612	case 6: {
613	    bus_dmamem_unmap(sc->sc_dmatag,
614			     (caddr_t) pdq->pdq_unsolicited_info.ui_events,
615			     PDQ_OS_PAGESIZE);
616	    /* FALL THROUGH */
617	}
618	case 5: {
619	    bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
620	    /* FALL THROUGH */
621	}
622	case 4: {
623	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
624	    /* FALL THROUGH */
625	}
626	case 3: {
627	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
628	    /* FALL THROUGH */
629	}
630	case 2: {
631	    bus_dmamem_unmap(sc->sc_dmatag,
632			     (caddr_t) pdq->pdq_dbp,
633			     sizeof(*pdq->pdq_dbp));
634	    /* FALL THROUGH */
635	}
636	case 1: {
637	    bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
638	    /* FALL THROUGH */
639	}
640    }
641
642    return not_ok;
643}
644
645extern void
646pdq_os_descriptor_block_sync(
647    pdq_os_ctx_t *sc,
648    size_t offset,
649    size_t length,
650    int ops)
651{
652    bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops);
653}
654
655extern void
656pdq_os_consumer_block_sync(
657    pdq_os_ctx_t *sc,
658    int ops)
659{
660    bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops);
661}
662
663extern void
664pdq_os_unsolicited_event_sync(
665    pdq_os_ctx_t *sc,
666    size_t offset,
667    size_t length,
668    int ops)
669{
670    bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops);
671}
672
673extern void
674pdq_os_databuf_sync(
675    pdq_os_ctx_t *sc,
676    struct mbuf *m,
677    size_t offset,
678    size_t length,
679    int ops)
680{
681    bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops);
682}
683
684extern void
685pdq_os_databuf_free(
686    pdq_os_ctx_t *sc,
687    struct mbuf *m)
688{
689    if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
690	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
691	bus_dmamap_unload(sc->sc_dmatag, map);
692	bus_dmamap_destroy(sc->sc_dmatag, map);
693	m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
694    }
695    m_freem(m);
696}
697
698extern struct mbuf *
699pdq_os_databuf_alloc(
700    pdq_os_ctx_t *sc)
701{
702    struct mbuf *m;
703    bus_dmamap_t map;
704
705    MGETHDR(m, M_DONTWAIT, MT_DATA);
706    if (m == NULL) {
707	printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
708	return NULL;
709    }
710    MCLGET(m, M_DONTWAIT);
711    if ((m->m_flags & M_EXT) == 0) {
712	printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
713        m_free(m);
714	return NULL;
715    }
716    m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;
717
718    if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
719			   1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
720	printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname);
721	m_free(m);
722	return NULL;
723    }
724    if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
725    			     BUS_DMA_READ|BUS_DMA_NOWAIT)) {
726	printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname);
727	bus_dmamap_destroy(sc->sc_dmatag, map);
728	m_free(m);
729	return NULL;
730    }
731    m->m_flags |= M_HASRXDMAMAP;
732    M_SETCTX(m, map);
733    return m;
734}
735#endif
736