pdq_ifsubr.c revision 112309
1/*	$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $	*/
2
3/*-
4 * Copyright (c) 1995, 1996 Matt Thomas <matt@3am-software.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$
27 * $FreeBSD: head/sys/dev/pdq/pdq_ifsubr.c 112309 2003-03-16 00:24:18Z mdodd $
28 */
29
30/*
31 * DEC PDQ FDDI Controller; code for BSD derived operating systems
32 *
33 *	This module provide bus independent BSD specific O/S functions.
34 *	(ie. it provides an ifnet interface to the rest of the system)
35 */
36
37#ifdef __NetBSD__
38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $");
40#endif
41
42#define PDQ_OSSUPPORT
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/malloc.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52
53#include <sys/module.h>
54#include <sys/bus.h>
55
56#include <machine/bus_memio.h>
57#include <machine/bus_pio.h>
58#include <machine/bus.h>
59#include <machine/resource.h>
60#include <sys/rman.h>
61
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/if_dl.h>
65#include <net/if_media.h>
66#include <net/fddi.h>
67
68#include <net/bpf.h>
69
70#include <dev/pdq/pdq_freebsd.h>
71#include <dev/pdq/pdqreg.h>
72
73devclass_t pdq_devclass;
74
75static void
76pdq_ifinit(
77    pdq_softc_t *sc)
78{
79    if (sc->sc_if.if_flags & IFF_UP) {
80	sc->sc_if.if_flags |= IFF_RUNNING;
81	if (sc->sc_if.if_flags & IFF_PROMISC) {
82	    sc->sc_pdq->pdq_flags |= PDQ_PROMISC;
83	} else {
84	    sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC;
85	}
86	if (sc->sc_if.if_flags & IFF_LINK1) {
87	    sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT;
88	} else {
89	    sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT;
90	}
91	sc->sc_pdq->pdq_flags |= PDQ_RUNNING;
92	pdq_run(sc->sc_pdq);
93    } else {
94	sc->sc_if.if_flags &= ~IFF_RUNNING;
95	sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING;
96	pdq_stop(sc->sc_pdq);
97    }
98}
99
100static void
101pdq_ifwatchdog(
102    struct ifnet *ifp)
103{
104    /*
105     * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT
106     * seconds.  Remove all queued packets.
107     */
108
109    ifp->if_flags &= ~IFF_OACTIVE;
110    ifp->if_timer = 0;
111    for (;;) {
112	struct mbuf *m;
113	IFQ_DEQUEUE(&ifp->if_snd, m);
114	if (m == NULL)
115	    return;
116	PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m);
117    }
118}
119
120static void
121pdq_ifstart(
122    struct ifnet *ifp)
123{
124    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
125    struct mbuf *m;
126    int tx = 0;
127
128    if ((ifp->if_flags & IFF_RUNNING) == 0)
129	return;
130
131    if (sc->sc_if.if_timer == 0)
132	sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
133
134    if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) {
135	sc->sc_if.if_flags |= IFF_OACTIVE;
136	return;
137    }
138    sc->sc_flags |= PDQIF_DOWNCALL;
139    for (;; tx = 1) {
140	IF_DEQUEUE(&ifp->if_snd, m);
141	if (m == NULL)
142	    break;
143#if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX)
144	if ((m->m_flags & M_HASTXDMAMAP) == 0) {
145	    bus_dmamap_t map;
146	    if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
147		m->m_data[0] = PDQ_FDDI_PH0;
148		m->m_data[1] = PDQ_FDDI_PH1;
149		m->m_data[2] = PDQ_FDDI_PH2;
150	    }
151	    if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255,
152				   m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) {
153		if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
154					  BUS_DMA_WRITE|BUS_DMA_NOWAIT)) {
155		    bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len,
156				    BUS_DMASYNC_PREWRITE);
157		    M_SETCTX(m, map);
158		    m->m_flags |= M_HASTXDMAMAP;
159		}
160	    }
161	    if ((m->m_flags & M_HASTXDMAMAP) == 0)
162		break;
163	}
164#else
165	if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) {
166	    m->m_data[0] = PDQ_FDDI_PH0;
167	    m->m_data[1] = PDQ_FDDI_PH1;
168	    m->m_data[2] = PDQ_FDDI_PH2;
169	}
170#endif
171
172	if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE)
173	    break;
174    }
175    if (m != NULL) {
176	ifp->if_flags |= IFF_OACTIVE;
177	IF_PREPEND(&ifp->if_snd, m);
178    }
179    if (tx)
180	PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq);
181    sc->sc_flags &= ~PDQIF_DOWNCALL;
182}
183
184void
185pdq_os_receive_pdu(
186    pdq_t *pdq,
187    struct mbuf *m,
188    size_t pktlen,
189    int drop)
190{
191    pdq_softc_t *sc = pdq->pdq_os_ctx;
192    struct ifnet *ifp = &sc->sc_if;
193    struct fddi_header *fh;
194
195    ifp->if_ipackets++;
196#if defined(PDQ_BUS_DMA)
197    {
198	/*
199	 * Even though the first mbuf start at the first fddi header octet,
200	 * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier.  Any additional
201	 * mbufs will start normally.
202	 */
203	int offset = PDQ_OS_HDR_OFFSET;
204	struct mbuf *m0;
205	for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) {
206	    pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD);
207	    bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
208	    bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t));
209	    m0->m_flags &= ~M_HASRXDMAMAP;
210	    M_SETCTX(m0, NULL);
211	}
212    }
213#endif
214    m->m_pkthdr.len = pktlen;
215#if NBPFILTER > 0 && defined(__NetBSD__)
216    if (sc->sc_bpf != NULL)
217	PDQ_BPF_MTAP(sc, m);
218#endif
219    fh = mtod(m, struct fddi_header *);
220    if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) {
221	ifp->if_iqdrops++;
222	ifp->if_ierrors++;
223	PDQ_OS_DATABUF_FREE(pdq, m);
224	return;
225    }
226
227    m->m_pkthdr.rcvif = ifp;
228    (*ifp->if_input)(ifp, m);
229}
230
231void
232pdq_os_restart_transmitter(
233    pdq_t *pdq)
234{
235    pdq_softc_t *sc = pdq->pdq_os_ctx;
236    sc->sc_if.if_flags &= ~IFF_OACTIVE;
237    if (IFQ_IS_EMPTY(&sc->sc_if.if_snd) == 0) {
238	sc->sc_if.if_timer = PDQ_OS_TX_TIMEOUT;
239	if ((sc->sc_flags & PDQIF_DOWNCALL) == 0)
240	    pdq_ifstart(&sc->sc_if);
241    } else {
242	sc->sc_if.if_timer = 0;
243    }
244}
245
246void
247pdq_os_transmit_done(
248    pdq_t *pdq,
249    struct mbuf *m)
250{
251    pdq_softc_t *sc = pdq->pdq_os_ctx;
252#if NBPFILTER > 0
253    if (sc->sc_bpf != NULL)
254	PDQ_BPF_MTAP(sc, m);
255#endif
256    PDQ_OS_DATABUF_FREE(pdq, m);
257    sc->sc_if.if_opackets++;
258}
259
260void
261pdq_os_addr_fill(
262    pdq_t *pdq,
263    pdq_lanaddr_t *addr,
264    size_t num_addrs)
265{
266    pdq_softc_t *sc = pdq->pdq_os_ctx;
267    struct ifnet *ifp;
268    struct ifmultiaddr *ifma;
269
270    ifp = &sc->arpcom.ac_if;
271
272    /*
273     * ADDR_FILTER_SET is always issued before FILTER_SET so
274     * we can play with PDQ_ALLMULTI and not worry about
275     * queueing a FILTER_SET ourselves.
276     */
277
278    pdq->pdq_flags &= ~PDQ_ALLMULTI;
279#if defined(IFF_ALLMULTI)
280    sc->sc_if.if_flags &= ~IFF_ALLMULTI;
281#endif
282
283    for (ifma = TAILQ_FIRST(&sc->sc_if.if_multiaddrs); ifma && num_addrs > 0;
284	 ifma = TAILQ_NEXT(ifma, ifma_link)) {
285	    char *mcaddr;
286	    if (ifma->ifma_addr->sa_family != AF_LINK)
287		    continue;
288	    mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
289	    ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0];
290	    ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1];
291	    ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2];
292	    addr++;
293	    num_addrs--;
294    }
295    /*
296     * If not all the address fit into the CAM, turn on all-multicast mode.
297     */
298    if (ifma != NULL) {
299	pdq->pdq_flags |= PDQ_ALLMULTI;
300#if defined(IFF_ALLMULTI)
301	sc->sc_if.if_flags |= IFF_ALLMULTI;
302#endif
303    }
304}
305
306#if defined(IFM_FDDI)
307static int
308pdq_ifmedia_change(
309    struct ifnet *ifp)
310{
311    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
312
313    if (sc->sc_ifmedia.ifm_media & IFM_FDX) {
314	if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) {
315	    sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX;
316	    if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
317		pdq_run(sc->sc_pdq);
318	}
319    } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) {
320	sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX;
321	if (sc->sc_pdq->pdq_flags & PDQ_RUNNING)
322	    pdq_run(sc->sc_pdq);
323    }
324
325    return 0;
326}
327
328static void
329pdq_ifmedia_status(
330    struct ifnet *ifp,
331    struct ifmediareq *ifmr)
332{
333    pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp);
334
335    ifmr->ifm_status = IFM_AVALID;
336    if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING)
337	ifmr->ifm_status |= IFM_ACTIVE;
338
339    ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX);
340    if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX)
341	ifmr->ifm_active |= IFM_FDX;
342}
343
344void
345pdq_os_update_status(
346    pdq_t *pdq,
347    const void *arg)
348{
349    pdq_softc_t * const sc = pdq->pdq_os_ctx;
350    const pdq_response_status_chars_get_t *rsp = arg;
351    int media = 0;
352
353    switch (rsp->status_chars_get.pmd_type[0]) {
354	case PDQ_PMD_TYPE_ANSI_MUTLI_MODE:         media = IFM_FDDI_MMF; break;
355	case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break;
356	case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break;
357	case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break;
358	default: media |= IFM_MANUAL;
359    }
360
361    if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
362	media |= IFM_FDDI_DA;
363
364    sc->sc_ifmedia.ifm_media = media | IFM_FDDI;
365}
366#endif /* defined(IFM_FDDI) */
367
368static int
369pdq_ifioctl(
370    struct ifnet *ifp,
371    u_long cmd,
372    caddr_t data)
373{
374    pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp);
375    int error = 0;
376
377    PDQ_LOCK(sc);
378
379    switch (cmd) {
380	case SIOCSIFMTU:
381	case SIOCGIFADDR:
382	case SIOCSIFADDR: {
383	    error = fddi_ioctl(ifp, cmd, data);
384	    break;
385	}
386
387	case SIOCSIFFLAGS: {
388	    pdq_ifinit(sc);
389	    break;
390	}
391
392	case SIOCADDMULTI:
393	case SIOCDELMULTI: {
394	    if (sc->sc_if.if_flags & IFF_RUNNING) {
395		    pdq_run(sc->sc_pdq);
396		error = 0;
397	    }
398	    break;
399	}
400
401#if defined(IFM_FDDI) && defined(SIOCSIFMEDIA)
402	case SIOCSIFMEDIA:
403	case SIOCGIFMEDIA: {
404	    struct ifreq *ifr = (struct ifreq *)data;
405	    error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
406	    break;
407	}
408#endif
409
410	default: {
411	    error = EINVAL;
412	    break;
413	}
414    }
415
416    PDQ_UNLOCK(sc);
417    return error;
418}
419
420#ifndef IFF_NOTRAILERS
421#define	IFF_NOTRAILERS	0
422#endif
423
424void
425pdq_ifattach(pdq_softc_t *sc)
426{
427    struct ifnet *ifp = &sc->sc_if;
428
429    mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK,
430	MTX_DEF | MTX_RECURSE);
431
432    ifp->if_softc = sc;
433    ifp->if_init = (if_init_f_t *)pdq_ifinit;
434    ifp->if_snd.ifq_maxlen = IFQ_MAXLEN;
435    ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
436
437#if (defined(__FreeBSD__) && BSD >= 199506) || defined(__NetBSD__)
438    ifp->if_watchdog = pdq_ifwatchdog;
439#else
440    ifp->if_watchdog = ifwatchdog;
441#endif
442
443    ifp->if_ioctl = pdq_ifioctl;
444#if !defined(__NetBSD__) && !defined(__FreeBSD__)
445    ifp->if_output = fddi_output;
446#endif
447    ifp->if_start = pdq_ifstart;
448
449#if defined(IFM_FDDI)
450    {
451	const int media = sc->sc_ifmedia.ifm_media;
452	ifmedia_init(&sc->sc_ifmedia, IFM_FDX,
453		     pdq_ifmedia_change, pdq_ifmedia_status);
454	ifmedia_add(&sc->sc_ifmedia, media, 0, 0);
455	ifmedia_set(&sc->sc_ifmedia, media);
456    }
457#endif
458
459#if defined(__NetBSD__)
460    if_attach(ifp);
461    fddi_ifattach(ifp, (caddr_t)&sc->sc_pdq->pdq_hwaddr);
462#else
463    fddi_ifattach(ifp, FDDI_BPF_SUPPORTED);
464#endif
465}
466
467void
468pdq_ifdetach (pdq_softc_t *sc)
469{
470    struct ifnet *ifp;
471
472    ifp = &sc->arpcom.ac_if;
473
474    fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED);
475    pdq_stop(sc->sc_pdq);
476    pdq_free(sc->dev);
477
478    return;
479}
480
481void
482pdq_free (device_t dev)
483{
484	pdq_softc_t *sc;
485
486	sc = device_get_softc(dev);
487
488	if (sc->io)
489		bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io);
490	if (sc->mem)
491		bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem);
492	if (sc->irq_ih)
493		bus_teardown_intr(dev, sc->irq, sc->irq_ih);
494	if (sc->irq)
495		bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
496
497	/*
498	 * Destroy the mutex.
499	 */
500	if (mtx_initialized(&sc->mtx) != 0) {
501		mtx_destroy(&sc->mtx);
502	}
503
504	return;
505}
506
507#if defined(PDQ_BUS_DMA)
508int
509pdq_os_memalloc_contig(
510    pdq_t *pdq)
511{
512    pdq_softc_t * const sc = pdq->pdq_os_ctx;
513    bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1];
514    int db_nsegs = 0, ui_nsegs = 0;
515    int steps = 0;
516    int not_ok;
517
518    not_ok = bus_dmamem_alloc(sc->sc_dmatag,
519			 sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp),
520			 sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs,
521			 BUS_DMA_NOWAIT);
522    if (!not_ok) {
523	steps = 1;
524	not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs,
525				sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp,
526				BUS_DMA_NOWAIT);
527    }
528    if (!not_ok) {
529	steps = 2;
530	not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1,
531				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap);
532    }
533    if (!not_ok) {
534	steps = 3;
535	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap,
536				 pdq->pdq_dbp, sizeof(*pdq->pdq_dbp),
537				 NULL, BUS_DMA_NOWAIT);
538    }
539    if (!not_ok) {
540	steps = 4;
541	pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr;
542	not_ok = bus_dmamem_alloc(sc->sc_dmatag,
543			 PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE,
544			 ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT);
545    }
546    if (!not_ok) {
547	steps = 5;
548	not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs,
549			    PDQ_OS_PAGESIZE,
550			    (caddr_t *) &pdq->pdq_unsolicited_info.ui_events,
551			    BUS_DMA_NOWAIT);
552    }
553    if (!not_ok) {
554	steps = 6;
555	not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1,
556				   PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT,
557				   &sc->sc_uimap);
558    }
559    if (!not_ok) {
560	steps = 7;
561	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap,
562				 pdq->pdq_unsolicited_info.ui_events,
563				 PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT);
564    }
565    if (!not_ok) {
566	steps = 8;
567	pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr;
568	cb_segs[0] = db_segs[0];
569	cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer);
570	cb_segs[0].ds_len = sizeof(pdq_consumer_block_t);
571	not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1,
572				sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp,
573				BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
574    }
575    if (!not_ok) {
576	steps = 9;
577	not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1,
578				   0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap);
579    }
580    if (!not_ok) {
581	steps = 10;
582	not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap,
583				 (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp),
584				 NULL, BUS_DMA_NOWAIT);
585    }
586    if (!not_ok) {
587	pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr;
588	return not_ok;
589    }
590
591    switch (steps) {
592	case 11: {
593	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap);
594	    /* FALL THROUGH */
595	}
596	case 10: {
597	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap);
598	    /* FALL THROUGH */
599	}
600	case 9: {
601	    bus_dmamem_unmap(sc->sc_dmatag,
602			     (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp));
603	    /* FALL THROUGH */
604	}
605	case 8: {
606	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap);
607	    /* FALL THROUGH */
608	}
609	case 7: {
610	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap);
611	    /* FALL THROUGH */
612	}
613	case 6: {
614	    bus_dmamem_unmap(sc->sc_dmatag,
615			     (caddr_t) pdq->pdq_unsolicited_info.ui_events,
616			     PDQ_OS_PAGESIZE);
617	    /* FALL THROUGH */
618	}
619	case 5: {
620	    bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs);
621	    /* FALL THROUGH */
622	}
623	case 4: {
624	    bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap);
625	    /* FALL THROUGH */
626	}
627	case 3: {
628	    bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap);
629	    /* FALL THROUGH */
630	}
631	case 2: {
632	    bus_dmamem_unmap(sc->sc_dmatag,
633			     (caddr_t) pdq->pdq_dbp,
634			     sizeof(*pdq->pdq_dbp));
635	    /* FALL THROUGH */
636	}
637	case 1: {
638	    bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs);
639	    /* FALL THROUGH */
640	}
641    }
642
643    return not_ok;
644}
645
646extern void
647pdq_os_descriptor_block_sync(
648    pdq_os_ctx_t *sc,
649    size_t offset,
650    size_t length,
651    int ops)
652{
653    bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops);
654}
655
656extern void
657pdq_os_consumer_block_sync(
658    pdq_os_ctx_t *sc,
659    int ops)
660{
661    bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops);
662}
663
664extern void
665pdq_os_unsolicited_event_sync(
666    pdq_os_ctx_t *sc,
667    size_t offset,
668    size_t length,
669    int ops)
670{
671    bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops);
672}
673
674extern void
675pdq_os_databuf_sync(
676    pdq_os_ctx_t *sc,
677    struct mbuf *m,
678    size_t offset,
679    size_t length,
680    int ops)
681{
682    bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops);
683}
684
685extern void
686pdq_os_databuf_free(
687    pdq_os_ctx_t *sc,
688    struct mbuf *m)
689{
690    if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) {
691	bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t);
692	bus_dmamap_unload(sc->sc_dmatag, map);
693	bus_dmamap_destroy(sc->sc_dmatag, map);
694	m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP);
695    }
696    m_freem(m);
697}
698
699extern struct mbuf *
700pdq_os_databuf_alloc(
701    pdq_os_ctx_t *sc)
702{
703    struct mbuf *m;
704    bus_dmamap_t map;
705
706    MGETHDR(m, M_DONTWAIT, MT_DATA);
707    if (m == NULL) {
708	printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
709	return NULL;
710    }
711    MCLGET(m, M_DONTWAIT);
712    if ((m->m_flags & M_EXT) == 0) {
713	printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
714        m_free(m);
715	return NULL;
716    }
717    m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE;
718
719    if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE,
720			   1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) {
721	printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname);
722	m_free(m);
723	return NULL;
724    }
725    if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m,
726    			     BUS_DMA_READ|BUS_DMA_NOWAIT)) {
727	printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname);
728	bus_dmamap_destroy(sc->sc_dmatag, map);
729	m_free(m);
730	return NULL;
731    }
732    m->m_flags |= M_HASRXDMAMAP;
733    M_SETCTX(m, map);
734    return m;
735}
736#endif
737