1139790Simp/*-
266458Sdfr * Copyright (c) 1997 Justin T. Gibbs.
366458Sdfr * All rights reserved.
466458Sdfr *
566458Sdfr * Redistribution and use in source and binary forms, with or without
666458Sdfr * modification, are permitted provided that the following conditions
766458Sdfr * are met:
866458Sdfr * 1. Redistributions of source code must retain the above copyright
966458Sdfr *    notice, this list of conditions, and the following disclaimer,
1066458Sdfr *    without modification, immediately at the beginning of the file.
1166458Sdfr * 2. The name of the author may not be used to endorse or promote products
1266458Sdfr *    derived from this software without specific prior written permission.
1366458Sdfr *
1466458Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1566458Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1666458Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1766458Sdfr * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1866458Sdfr * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1966458Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2066458Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2166458Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2266458Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2366458Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2466458Sdfr * SUCH DAMAGE.
2566458Sdfr */
2666458Sdfr
27117133Smux#include <sys/cdefs.h>
28117133Smux__FBSDID("$FreeBSD$");
29117133Smux
3066458Sdfr#include <sys/param.h>
3166458Sdfr#include <sys/systm.h>
32117142Smarcel#include <sys/kernel.h>
3366458Sdfr#include <sys/malloc.h>
34112235Smux#include <sys/lock.h>
35112215Smux#include <sys/mutex.h>
3667636Sdfr#include <sys/bus.h>
3767636Sdfr#include <sys/interrupt.h>
38246713Skib#include <sys/memdesc.h>
39104486Ssam#include <sys/proc.h>
40246713Skib#include <sys/sysctl.h>
41104486Ssam#include <sys/uio.h>
4266458Sdfr
4366458Sdfr#include <vm/vm.h>
4466458Sdfr#include <vm/vm_page.h>
45104486Ssam#include <vm/vm_map.h>
4666458Sdfr
47112436Smux#include <machine/atomic.h>
4866458Sdfr#include <machine/bus.h>
4966458Sdfr#include <machine/md_var.h>
5066458Sdfr
51209026Smarcel#define	MAX_BPAGES	1024
5266458Sdfr
5366458Sdfrstruct bus_dma_tag {
54223171Smarcel	bus_dma_tag_t	parent;
55223171Smarcel	bus_size_t	alignment;
56232356Sjhb	bus_addr_t	boundary;
57223171Smarcel	bus_addr_t	lowaddr;
58223171Smarcel	bus_addr_t	highaddr;
5966458Sdfr	bus_dma_filter_t *filter;
60223171Smarcel	void		*filterarg;
61223171Smarcel	bus_size_t	maxsize;
62223171Smarcel	u_int		nsegments;
63223171Smarcel	bus_size_t	maxsegsz;
64223171Smarcel	int		flags;
65223171Smarcel	int		ref_count;
66223171Smarcel	int		map_count;
67223171Smarcel	bus_dma_lock_t	*lockfunc;
68223171Smarcel	void		*lockfuncarg;
69134928Smarcel	bus_dma_segment_t *segments;
7066458Sdfr};
7166458Sdfr
7266458Sdfrstruct bounce_page {
7366458Sdfr	vm_offset_t	vaddr;		/* kva of bounce buffer */
7466458Sdfr	bus_addr_t	busaddr;	/* Physical address */
7566458Sdfr	vm_offset_t	datavaddr;	/* kva of client data */
76246713Skib	bus_addr_t	dataaddr;	/* client physical address */
7766458Sdfr	bus_size_t	datacount;	/* client data count */
7866458Sdfr	STAILQ_ENTRY(bounce_page) links;
7966458Sdfr};
8066458Sdfr
81209026Smarcelu_int busdma_swi_pending;
8266458Sdfr
83117133Smuxstatic struct mtx bounce_lock;
8466458Sdfrstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
8566458Sdfrstatic int free_bpages;
8666458Sdfrstatic int reserved_bpages;
8766458Sdfrstatic int active_bpages;
8866458Sdfrstatic int total_bpages;
89134928Smarcelstatic int total_bounced;
90134928Smarcelstatic int total_deferred;
9166458Sdfr
92227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
93134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
94223171Smarcel    "Free bounce pages");
95134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
96223171Smarcel    0, "Reserved bounce pages");
97134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
98223171Smarcel    "Active bounce pages");
99134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
100223171Smarcel    "Total bounce pages");
101134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
102223171Smarcel    "Total bounce requests");
103223171SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred,
104223171Smarcel    0, "Total bounce requests that were deferred");
105134928Smarcel
10666458Sdfrstruct bus_dmamap {
107223171Smarcel	struct bp_list	bpages;
108223171Smarcel	int		pagesneeded;
109223171Smarcel	int		pagesreserved;
110223171Smarcel	bus_dma_tag_t	dmat;
111246713Skib	struct memdesc	mem;
11266458Sdfr	bus_dmamap_callback_t *callback;
113223171Smarcel	void		*callback_arg;
11466458Sdfr	STAILQ_ENTRY(bus_dmamap) links;
11566458Sdfr};
11666458Sdfr
11766458Sdfrstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
11866458Sdfrstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
11966458Sdfrstatic struct bus_dmamap nobounce_dmamap;
12066458Sdfr
121117133Smuxstatic void init_bounce_pages(void *dummy);
12266458Sdfrstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
123117139Smuxstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
124223171Smarcel    int commit);
125117139Smuxstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
126246713Skib    vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
12766458Sdfrstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
128134928Smarcelstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
129223171Smarcel    bus_size_t len);
13066458Sdfr
131117139Smux/*
132117139Smux * Return true if a match is made.
133117139Smux *
134117139Smux * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
135117139Smux *
136117139Smux * If paddr is within the bounds of the dma tag then call the filter callback
137117139Smux * to check for a match, if there is no filter callback then assume a match.
138117139Smux */
13966458Sdfrstatic __inline int
140134928Smarcelrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
14166458Sdfr{
142232356Sjhb	bus_addr_t bndy;
14366458Sdfr	int retval;
14466458Sdfr
14566458Sdfr	retval = 0;
146134928Smarcel	bndy = dmat->boundary;
14766458Sdfr	do {
148223171Smarcel		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
149223171Smarcel		    (paddr & (dmat->alignment - 1)) != 0 ||
150223171Smarcel		    (paddr & bndy) != ((paddr + len) & bndy)) &&
151223171Smarcel		    (dmat->filter == NULL ||
152223171Smarcel		    (*dmat->filter)(dmat->filterarg, paddr) != 0))
15366458Sdfr			retval = 1;
154223171Smarcel		dmat = dmat->parent;
15566458Sdfr	} while (retval == 0 && dmat != NULL);
15666458Sdfr	return (retval);
15766458Sdfr}
15866458Sdfr
159117126Sscottl/*
160117126Sscottl * Convenience function for manipulating driver locks from busdma (during
161117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
162117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
163117126Sscottl * non-mutex locking scheme don't have to use this at all.
164117126Sscottl */
165117126Sscottlvoid
166117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
167117126Sscottl{
168117126Sscottl	struct mtx *dmtx;
169117126Sscottl
170117126Sscottl	dmtx = (struct mtx *)arg;
171117126Sscottl	switch (op) {
172117126Sscottl	case BUS_DMA_LOCK:
173117126Sscottl		mtx_lock(dmtx);
174117126Sscottl		break;
175117126Sscottl	case BUS_DMA_UNLOCK:
176117126Sscottl		mtx_unlock(dmtx);
177117126Sscottl		break;
178117126Sscottl	default:
179117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
180117126Sscottl	}
181117126Sscottl}
182117126Sscottl
183117126Sscottl/*
184117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
185117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
186117126Sscottl * with the tag are meant to never be defered.
187117126Sscottl * XXX Should have a way to identify which driver is responsible here.
188117126Sscottl */
189117126Sscottlstatic void
190117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
191117126Sscottl{
192117126Sscottl	panic("driver error: busdma dflt_lock called");
193117126Sscottl}
194117126Sscottl
19566458Sdfr#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
196223171Smarcel
19766458Sdfr/*
19866458Sdfr * Allocate a device specific dma_tag.
19966458Sdfr */
20066458Sdfrint
20166458Sdfrbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
202232356Sjhb    bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
203223171Smarcel    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
204223171Smarcel    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
205223171Smarcel    void *lockfuncarg, bus_dma_tag_t *dmat)
20666458Sdfr{
20766458Sdfr	bus_dma_tag_t newtag;
20866458Sdfr	int error = 0;
20966458Sdfr
210134928Smarcel	/* Basic sanity checking */
211134928Smarcel	if (boundary != 0 && boundary < maxsegsz)
212134928Smarcel		maxsegsz = boundary;
213134928Smarcel
21466458Sdfr	/* Return a NULL tag on failure */
21566458Sdfr	*dmat = NULL;
21666458Sdfr
21766458Sdfr	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
21866458Sdfr	if (newtag == NULL)
21966458Sdfr		return (ENOMEM);
22066458Sdfr
22166458Sdfr	newtag->parent = parent;
22266458Sdfr	newtag->alignment = alignment;
22366458Sdfr	newtag->boundary = boundary;
22466458Sdfr	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
22566458Sdfr	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
22666458Sdfr	newtag->filter = filter;
22766458Sdfr	newtag->filterarg = filterarg;
22866458Sdfr	newtag->maxsize = maxsize;
22966458Sdfr	newtag->nsegments = nsegments;
23066458Sdfr	newtag->maxsegsz = maxsegsz;
23166458Sdfr	newtag->flags = flags;
23266458Sdfr	newtag->ref_count = 1; /* Count ourself */
23366458Sdfr	newtag->map_count = 0;
234117126Sscottl	if (lockfunc != NULL) {
235117126Sscottl		newtag->lockfunc = lockfunc;
236117126Sscottl		newtag->lockfuncarg = lockfuncarg;
237117126Sscottl	} else {
238117126Sscottl		newtag->lockfunc = dflt_lock;
239117126Sscottl		newtag->lockfuncarg = NULL;
240117126Sscottl	}
241134928Smarcel	newtag->segments = NULL;
242134928Smarcel
24366458Sdfr	/* Take into account any restrictions imposed by our parent tag */
24466458Sdfr	if (parent != NULL) {
24566458Sdfr		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
24666458Sdfr		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
247134934Sscottl		if (newtag->boundary == 0)
248134934Sscottl			newtag->boundary = parent->boundary;
249134934Sscottl		else if (parent->boundary != 0)
250134934Sscottl			newtag->boundary = MIN(parent->boundary,
251223171Smarcel			    newtag->boundary);
25266458Sdfr		if (newtag->filter == NULL) {
25366458Sdfr			/*
25466458Sdfr			 * Short circuit looking at our parent directly
25566458Sdfr			 * since we have encapsulated all of its information
25666458Sdfr			 */
25766458Sdfr			newtag->filter = parent->filter;
25866458Sdfr			newtag->filterarg = parent->filterarg;
25966458Sdfr			newtag->parent = parent->parent;
26066458Sdfr		}
261112436Smux		if (newtag->parent != NULL)
262112436Smux			atomic_add_int(&parent->ref_count, 1);
26366458Sdfr	}
264134928Smarcel
265238184Smarcel	if (newtag->lowaddr < paddr_max && (flags & BUS_DMA_ALLOCNOW) != 0) {
26666458Sdfr		/* Must bounce */
26766458Sdfr
26866458Sdfr		if (ptoa(total_bpages) < maxsize) {
26966458Sdfr			int pages;
27066458Sdfr
27166458Sdfr			pages = atop(maxsize) - total_bpages;
27266458Sdfr
27366458Sdfr			/* Add pages to our bounce pool */
27466458Sdfr			if (alloc_bounce_pages(newtag, pages) < pages)
27566458Sdfr				error = ENOMEM;
27666458Sdfr		}
27766458Sdfr		/* Performed initial allocation */
27866458Sdfr		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
27966458Sdfr	}
280223171Smarcel
28166458Sdfr	if (error != 0) {
28266458Sdfr		free(newtag, M_DEVBUF);
28366458Sdfr	} else {
28466458Sdfr		*dmat = newtag;
28566458Sdfr	}
28666458Sdfr	return (error);
28766458Sdfr}
28866458Sdfr
28966458Sdfrint
29066458Sdfrbus_dma_tag_destroy(bus_dma_tag_t dmat)
29166458Sdfr{
29266458Sdfr	if (dmat != NULL) {
29366458Sdfr
29466458Sdfr		if (dmat->map_count != 0)
29566458Sdfr			return (EBUSY);
29666458Sdfr
29766458Sdfr		while (dmat != NULL) {
29866458Sdfr			bus_dma_tag_t parent;
29966458Sdfr
30066458Sdfr			parent = dmat->parent;
301112436Smux			atomic_subtract_int(&dmat->ref_count, 1);
30266458Sdfr			if (dmat->ref_count == 0) {
303134928Smarcel				if (dmat->segments != NULL)
304134928Smarcel					free(dmat->segments, M_DEVBUF);
30566458Sdfr				free(dmat, M_DEVBUF);
306117139Smux				/*
307117139Smux				 * Last reference count, so
308117139Smux				 * release our reference
309117139Smux				 * count on our parent.
310117139Smux				 */
311117139Smux				dmat = parent;
312117139Smux			} else
313117139Smux				dmat = NULL;
31466458Sdfr		}
31566458Sdfr	}
31666458Sdfr	return (0);
31766458Sdfr}
31866458Sdfr
31966458Sdfr/*
32066458Sdfr * Allocate a handle for mapping from kva/uva/physical
32166458Sdfr * address space into bus device space.
32266458Sdfr */
32366458Sdfrint
32466458Sdfrbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
32566458Sdfr{
32666458Sdfr	int error;
32766458Sdfr
32866458Sdfr	error = 0;
32966458Sdfr
330134928Smarcel	if (dmat->segments == NULL) {
331134928Smarcel		dmat->segments = (bus_dma_segment_t *)malloc(
332134928Smarcel		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
333134928Smarcel		    M_NOWAIT);
334134928Smarcel		if (dmat->segments == NULL)
33566458Sdfr			return (ENOMEM);
33666458Sdfr	}
33766458Sdfr
338134928Smarcel	/*
339134928Smarcel	 * Bouncing might be required if the driver asks for an active
340134928Smarcel	 * exclusion region, a data alignment that is stricter than 1, and/or
341134928Smarcel	 * an active address boundary.
342134928Smarcel	 */
343238184Smarcel	if (dmat->lowaddr < paddr_max) {
34466458Sdfr		/* Must bounce */
34566458Sdfr		int maxpages;
34666458Sdfr
34766458Sdfr		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
348223171Smarcel		    M_NOWAIT | M_ZERO);
34969781Sdwmalone		if (*mapp == NULL)
35066458Sdfr			return (ENOMEM);
35169781Sdwmalone
35269781Sdwmalone		/* Initialize the new map */
35369781Sdwmalone		STAILQ_INIT(&((*mapp)->bpages));
35469781Sdwmalone
35566458Sdfr		/*
35666458Sdfr		 * Attempt to add pages to our pool on a per-instance
35766458Sdfr		 * basis up to a sane limit.
35866458Sdfr		 */
359238184Smarcel		maxpages = MIN(MAX_BPAGES, atop(paddr_max - dmat->lowaddr));
36066458Sdfr		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
361134928Smarcel		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
36266458Sdfr			int pages;
36366458Sdfr
364134928Smarcel			pages = MAX(atop(dmat->maxsize), 1);
36566458Sdfr			pages = MIN(maxpages - total_bpages, pages);
366134928Smarcel			if (alloc_bounce_pages(dmat, pages) < pages)
367134928Smarcel				error = ENOMEM;
36866458Sdfr
36966458Sdfr			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
37066458Sdfr				if (error == 0)
37166458Sdfr					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
37266458Sdfr			} else {
37366458Sdfr				error = 0;
37466458Sdfr			}
37566458Sdfr		}
37666458Sdfr	} else {
377134928Smarcel		*mapp = NULL;
37866458Sdfr	}
37966458Sdfr	if (error == 0)
38066458Sdfr		dmat->map_count++;
38166458Sdfr	return (error);
38266458Sdfr}
38366458Sdfr
38466458Sdfr/*
38566458Sdfr * Destroy a handle for mapping from kva/uva/physical
38666458Sdfr * address space into bus device space.
38766458Sdfr */
38866458Sdfrint
38966458Sdfrbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
39066458Sdfr{
39166458Sdfr
392109490Smux	if (map != NULL && map != &nobounce_dmamap) {
39366458Sdfr		if (STAILQ_FIRST(&map->bpages) != NULL)
39466458Sdfr			return (EBUSY);
39566458Sdfr		free(map, M_DEVBUF);
39666458Sdfr	}
39766458Sdfr	dmat->map_count--;
39866458Sdfr	return (0);
39966458Sdfr}
40066458Sdfr
40166458Sdfr
40266458Sdfr/*
40366458Sdfr * Allocate a piece of memory that can be efficiently mapped into
40466458Sdfr * bus device space based on the constraints lited in the dma tag.
40566458Sdfr * A dmamap to for use with dmamap_load is also allocated.
40666458Sdfr */
40766458Sdfrint
40866458Sdfrbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
409223171Smarcel    bus_dmamap_t *mapp)
41066458Sdfr{
411159130Ssilby	int mflags;
412118081Smux
413118081Smux	if (flags & BUS_DMA_NOWAIT)
414118081Smux		mflags = M_NOWAIT;
415118081Smux	else
416118081Smux		mflags = M_WAITOK;
417118081Smux
41866458Sdfr	/* If we succeed, no mapping/bouncing will be required */
419134928Smarcel	*mapp = NULL;
42066458Sdfr
421134928Smarcel	if (dmat->segments == NULL) {
422159130Ssilby		dmat->segments = (bus_dma_segment_t *)malloc(
423159130Ssilby		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
424180533Salc		    mflags);
425134928Smarcel		if (dmat->segments == NULL)
426134928Smarcel			return (ENOMEM);
427134928Smarcel	}
428180533Salc	if (flags & BUS_DMA_ZERO)
429180533Salc		mflags |= M_ZERO;
430134928Smarcel
431223171Smarcel	/*
432159093Smjacob	 * XXX:
433159093Smjacob	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
434159093Smjacob	 * alignment guarantees of malloc need to be nailed down, and the
435159093Smjacob	 * code below should be rewritten to take that into account.
436159093Smjacob	 *
437159130Ssilby	 * In the meantime, we'll warn the user if malloc gets it wrong.
438159093Smjacob	 */
439159093Smjacob	if ((dmat->maxsize <= PAGE_SIZE) &&
440159093Smjacob	   (dmat->alignment < dmat->maxsize) &&
441238184Smarcel	    dmat->lowaddr >= paddr_max) {
442118081Smux		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
44366458Sdfr	} else {
44466458Sdfr		/*
44566458Sdfr		 * XXX Use Contigmalloc until it is merged into this facility
44666458Sdfr		 *     and handles multi-seg allocations.  Nobody is doing
44766458Sdfr		 *     multi-seg allocations yet though.
448134928Smarcel		 * XXX Certain AGP hardware does.
44966458Sdfr		 */
450118081Smux		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
45166458Sdfr		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
45266458Sdfr		    dmat->boundary);
45366458Sdfr	}
454159130Ssilby	if (*vaddr == NULL)
45566458Sdfr		return (ENOMEM);
456213282Sneel	else if (vtophys(*vaddr) & (dmat->alignment - 1))
457163386Shrs		printf("bus_dmamem_alloc failed to align memory properly.\n");
45866458Sdfr	return (0);
45966458Sdfr}
46066458Sdfr
46166458Sdfr/*
46266458Sdfr * Free a piece of memory and it's allociated dmamap, that was allocated
463117139Smux * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
46466458Sdfr */
46566458Sdfrvoid
46666458Sdfrbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
46766458Sdfr{
46866458Sdfr	/*
46966458Sdfr	 * dmamem does not need to be bounced, so the map should be
47066458Sdfr	 * NULL
47166458Sdfr	 */
472134928Smarcel	if (map != NULL)
47366458Sdfr		panic("bus_dmamem_free: Invalid map freed\n");
474159130Ssilby	if ((dmat->maxsize <= PAGE_SIZE) &&
475159130Ssilby	   (dmat->alignment < dmat->maxsize) &&
476238184Smarcel	    dmat->lowaddr >= paddr_max)
477112195Smux		free(vaddr, M_DEVBUF);
478112196Smux	else {
479112195Smux		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
480112196Smux	}
48166458Sdfr}
48266458Sdfr
483246713Skibstatic void
484246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
485246713Skib    bus_size_t buflen, int flags)
48666458Sdfr{
487246713Skib	bus_addr_t curaddr;
488134928Smarcel	bus_size_t sgsize;
489246713Skib
490246713Skib	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
491246713Skib	    dmat->alignment > 1) && map != &nobounce_dmamap &&
492246713Skib	    map->pagesneeded == 0) {
493246713Skib		/*
494246713Skib		 * Count the number of bounce pages
495246713Skib		 * needed in order to complete this transfer
496246713Skib		 */
497246713Skib		curaddr = buf;
498246713Skib		while (buflen != 0) {
499246713Skib			sgsize = MIN(buflen, dmat->maxsegsz);
500246713Skib			if (run_filter(dmat, curaddr, 0) != 0) {
501246713Skib				sgsize = MIN(sgsize, PAGE_SIZE);
502246713Skib				map->pagesneeded++;
503246713Skib			}
504246713Skib			curaddr += sgsize;
505246713Skib			buflen -= sgsize;
506246713Skib		}
507246713Skib	}
508246713Skib}
509246713Skib
510246713Skibstatic void
511246713Skib_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
512246713Skib    void *buf, bus_size_t buflen, int flags)
513246713Skib{
514134928Smarcel	vm_offset_t vaddr;
515246713Skib	vm_offset_t vendaddr;
516134928Smarcel	bus_addr_t paddr;
51766458Sdfr
518238184Smarcel	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
519134928Smarcel	    dmat->alignment > 1) && map != &nobounce_dmamap &&
520134928Smarcel	    map->pagesneeded == 0) {
52166458Sdfr		/*
52266458Sdfr		 * Count the number of bounce pages
52366458Sdfr		 * needed in order to complete this transfer
52466458Sdfr		 */
525134928Smarcel		vaddr = trunc_page((vm_offset_t)buf);
52666458Sdfr		vendaddr = (vm_offset_t)buf + buflen;
52766458Sdfr
52866458Sdfr		while (vaddr < vendaddr) {
529246713Skib			if (pmap == kernel_pmap)
530246713Skib				paddr = pmap_kextract(vaddr);
531246713Skib			else
532191011Skib				paddr = pmap_extract(pmap, vaddr);
533173988Sjhb			if (run_filter(dmat, paddr, 0) != 0)
53466458Sdfr				map->pagesneeded++;
53566458Sdfr			vaddr += PAGE_SIZE;
53666458Sdfr		}
53766458Sdfr	}
538246713Skib}
53966458Sdfr
540246713Skibstatic int
541246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
542246713Skib{
543134928Smarcel
54466458Sdfr	/* Reserve Necessary Bounce Pages */
545246713Skib	mtx_lock(&bounce_lock);
546246713Skib	if (flags & BUS_DMA_NOWAIT) {
547246713Skib		if (reserve_bounce_pages(dmat, map, 0) != 0) {
548246713Skib			mtx_unlock(&bounce_lock);
549246713Skib			return (ENOMEM);
55066458Sdfr		}
551246713Skib	} else {
552246713Skib		if (reserve_bounce_pages(dmat, map, 1) != 0) {
553246713Skib			/* Queue us for resources */
554246713Skib			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
555246713Skib			    map, links);
556246713Skib			mtx_unlock(&bounce_lock);
557246713Skib			return (EINPROGRESS);
558246713Skib		}
55966458Sdfr	}
560246713Skib	mtx_unlock(&bounce_lock);
56166458Sdfr
562246713Skib	return (0);
563246713Skib}
564104486Ssam
565246713Skib/*
566246713Skib * Add a single contiguous physical range to the segment list.
567246713Skib */
568246713Skibstatic int
569246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
570246713Skib    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
571246713Skib{
572246713Skib	bus_addr_t baddr, bmask;
573246713Skib	int seg;
574104486Ssam
575246713Skib	/*
576246713Skib	 * Make sure we don't cross any boundaries.
577246713Skib	 */
578246713Skib	bmask = ~(dmat->boundary - 1);
579246713Skib	if (dmat->boundary > 0) {
580246713Skib		baddr = (curaddr + dmat->boundary) & bmask;
581246713Skib		if (sgsize > (baddr - curaddr))
582246713Skib			sgsize = (baddr - curaddr);
583246713Skib	}
584104486Ssam
585246713Skib	/*
586246713Skib	 * Insert chunk into a segment, coalescing with
587246713Skib	 * previous segment if possible.
588246713Skib	 */
589246713Skib	seg = *segp;
590246713Skib	if (seg == -1) {
591246713Skib		seg = 0;
592246713Skib		segs[seg].ds_addr = curaddr;
593246713Skib		segs[seg].ds_len = sgsize;
594246713Skib	} else {
595246713Skib		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
596246713Skib		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
597246713Skib		    (dmat->boundary == 0 ||
598246713Skib		    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
599246713Skib			segs[seg].ds_len += sgsize;
600246713Skib		else {
601246713Skib			if (++seg >= dmat->nsegments)
602246713Skib				return (0);
603104486Ssam			segs[seg].ds_addr = curaddr;
604104486Ssam			segs[seg].ds_len = sgsize;
605104486Ssam		}
606104486Ssam	}
607104486Ssam	*segp = seg;
608246713Skib	return (sgsize);
609104486Ssam}
610104486Ssam
611104486Ssam/*
612246713Skib * Utility function to load a physical buffer.  segp contains
613246713Skib * the starting segment on entrace, and the ending segment on exit.
614134928Smarcel */
615134928Smarcelint
616246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
617246713Skib    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
618246713Skib    int *segp)
619134928Smarcel{
620246713Skib	bus_addr_t curaddr;
621246713Skib	bus_size_t sgsize;
622246713Skib	int error;
623134928Smarcel
624246713Skib	if (map == NULL)
625246713Skib		map = &nobounce_dmamap;
626134928Smarcel
627246713Skib	if (segs == NULL)
628246713Skib		segs = dmat->segments;
629134928Smarcel
630246713Skib	if (map != &nobounce_dmamap) {
631246713Skib		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
632246713Skib		if (map->pagesneeded != 0) {
633246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
634246713Skib			if (error)
635246713Skib				return (error);
636246713Skib		}
637246713Skib	}
638134928Smarcel
639246713Skib	while (buflen > 0) {
640246713Skib		curaddr = buf;
641246713Skib		sgsize = MIN(buflen, dmat->maxsegsz);
642246713Skib		if (map->pagesneeded != 0 &&
643246713Skib		    run_filter(dmat, curaddr, sgsize)) {
644246713Skib			sgsize = MIN(sgsize, PAGE_SIZE);
645246713Skib			curaddr = add_bounce_page(dmat, map, 0, curaddr,
646246713Skib			    sgsize);
647246713Skib		}
648246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
649246713Skib		    segp);
650246713Skib		if (sgsize == 0)
651246713Skib			break;
652246713Skib		buf += sgsize;
653246713Skib		buflen -= sgsize;
654246713Skib	}
655134928Smarcel
656246713Skib	/*
657246713Skib	 * Did we fit?
658246713Skib	 */
659246713Skib	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
660134928Smarcel}
661259510Skib
662259510Skibint
663259510Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
664259510Skib    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
665259510Skib    bus_dma_segment_t *segs, int *segp)
666259510Skib{
667259510Skib
668259510Skib	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
669259510Skib	    segs, segp));
670259510Skib}
671259510Skib
672134928Smarcel/*
673246713Skib * Utility function to load a linear buffer.  segp contains
674246713Skib * the starting segment on entrace, and the ending segment on exit.
675104486Ssam */
676104486Ssamint
677246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
678246713Skib    bus_size_t buflen, pmap_t pmap, int flags,
679246713Skib    bus_dma_segment_t *segs, int *segp)
680104486Ssam{
681246713Skib	bus_size_t sgsize;
682246713Skib	bus_addr_t curaddr;
683246713Skib	vm_offset_t vaddr;
684246713Skib	int error;
685104486Ssam
686246713Skib	if (map == NULL)
687246713Skib		map = &nobounce_dmamap;
688104486Ssam
689246713Skib	if (segs == NULL)
690246713Skib		segs = dmat->segments;
691104486Ssam
692246713Skib	if (map != &nobounce_dmamap) {
693246713Skib		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
694246713Skib		if (map->pagesneeded != 0) {
695246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
696246713Skib			if (error)
697246713Skib				return (error);
698104486Ssam		}
699104486Ssam	}
700104486Ssam
701246713Skib	vaddr = (vm_offset_t)buf;
702104486Ssam
703246713Skib	while (buflen > 0) {
704246713Skib		/*
705246713Skib		 * Get the physical address for this segment.
706246713Skib		 */
707246713Skib		if (pmap == kernel_pmap)
708246713Skib			curaddr = pmap_kextract(vaddr);
709246713Skib		else
710246713Skib			curaddr = pmap_extract(pmap, vaddr);
711140311Sscottl
712246713Skib		/*
713246713Skib		 * Compute the segment size, and adjust counts.
714246713Skib		 */
715246713Skib		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
716246713Skib		if (sgsize > dmat->maxsegsz)
717246713Skib			sgsize = dmat->maxsegsz;
718246713Skib		if (buflen < sgsize)
719246713Skib			sgsize = buflen;
720140311Sscottl
721246713Skib		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
722246713Skib			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
723246713Skib			    sgsize);
724140311Sscottl
725246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
726246713Skib		    segp);
727246713Skib		if (sgsize == 0)
728246713Skib			break;
729246713Skib
730246713Skib		vaddr += sgsize;
731246713Skib		buflen -= sgsize;
732140311Sscottl	}
733140311Sscottl
734246713Skib	/*
735246713Skib	 * Did we fit?
736246713Skib	 */
737246713Skib	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
738140311Sscottl}
739140311Sscottl
740246713Skib
741246713Skibvoid
742246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
743246713Skib    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
744104486Ssam{
745246713Skib	if (map != NULL) {
746246713Skib		map->dmat = dmat;
747246713Skib		map->mem = *mem;
748246713Skib		map->callback = callback;
749246713Skib		map->callback_arg = callback_arg;
750104486Ssam	}
751246713Skib}
752104486Ssam
753246713Skibbus_dma_segment_t *
754246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
755246713Skib    bus_dma_segment_t *segs, int nsegs, int error)
756246713Skib{
757104486Ssam
758246713Skib	if (segs == NULL)
759246713Skib		segs = dmat->segments;
760246713Skib	return (segs);
761104486Ssam}
762104486Ssam
763104486Ssam/*
76466458Sdfr * Release the mapping held by map.
76566458Sdfr */
76666458Sdfrvoid
76766458Sdfr_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
76866458Sdfr{
76966458Sdfr	struct bounce_page *bpage;
77066458Sdfr
77166458Sdfr	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
77266458Sdfr		STAILQ_REMOVE_HEAD(&map->bpages, links);
77366458Sdfr		free_bounce_page(dmat, bpage);
77466458Sdfr	}
77566458Sdfr}
77666458Sdfr
77766458Sdfrvoid
778115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
77966458Sdfr{
78066458Sdfr	struct bounce_page *bpage;
78166458Sdfr
78266458Sdfr	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
78366458Sdfr		/*
78466458Sdfr		 * Handle data bouncing.  We might also
78566458Sdfr		 * want to add support for invalidating
78666458Sdfr		 * the caches on broken hardware
78766458Sdfr		 */
788134928Smarcel
789113347Smux		if (op & BUS_DMASYNC_PREWRITE) {
79066458Sdfr			while (bpage != NULL) {
791246713Skib				if (bpage->datavaddr != 0)
792246713Skib					bcopy((void *)bpage->datavaddr,
793246713Skib					    (void *)bpage->vaddr,
794246713Skib					    bpage->datacount);
795246713Skib				else
796246713Skib					physcopyout(bpage->dataaddr,
797246713Skib					    (void *)bpage->vaddr,
798246713Skib					    bpage->datacount);
79966458Sdfr				bpage = STAILQ_NEXT(bpage, links);
80066458Sdfr			}
801167277Sscottl			total_bounced++;
802113347Smux		}
80366458Sdfr
804113347Smux		if (op & BUS_DMASYNC_POSTREAD) {
80566458Sdfr			while (bpage != NULL) {
806246713Skib				if (bpage->datavaddr != 0)
807246713Skib					bcopy((void *)bpage->vaddr,
808246713Skib					    (void *)bpage->datavaddr,
809246713Skib					    bpage->datacount);
810246713Skib				else
811246713Skib					physcopyin((void *)bpage->vaddr,
812246713Skib					    bpage->dataaddr,
813246713Skib					    bpage->datacount);
81466458Sdfr				bpage = STAILQ_NEXT(bpage, links);
81566458Sdfr			}
816167277Sscottl			total_bounced++;
81766458Sdfr		}
81866458Sdfr	}
81966458Sdfr}
82066458Sdfr
821117133Smuxstatic void
822117133Smuxinit_bounce_pages(void *dummy __unused)
823117133Smux{
824117133Smux
825117133Smux	free_bpages = 0;
826117133Smux	reserved_bpages = 0;
827117133Smux	active_bpages = 0;
828117133Smux	total_bpages = 0;
829117133Smux	STAILQ_INIT(&bounce_page_list);
830117133Smux	STAILQ_INIT(&bounce_map_waitinglist);
831117133Smux	STAILQ_INIT(&bounce_map_callbacklist);
832117133Smux	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
833117133Smux}
834117133SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
835117133Smux
83666458Sdfrstatic int
83766458Sdfralloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
83866458Sdfr{
83966458Sdfr	int count;
84066458Sdfr
84166458Sdfr	count = 0;
84266458Sdfr	while (numpages > 0) {
84366458Sdfr		struct bounce_page *bpage;
84466458Sdfr
84566458Sdfr		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
846223171Smarcel		    M_NOWAIT | M_ZERO);
84766458Sdfr		if (bpage == NULL)
84866458Sdfr			break;
84966458Sdfr		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
850223171Smarcel		    M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, dmat->boundary);
851117139Smux		if (bpage->vaddr == 0) {
85266458Sdfr			free(bpage, M_DEVBUF);
85366458Sdfr			break;
85466458Sdfr		}
85566458Sdfr		bpage->busaddr = pmap_kextract(bpage->vaddr);
856117133Smux		mtx_lock(&bounce_lock);
85766458Sdfr		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
85866458Sdfr		total_bpages++;
85966458Sdfr		free_bpages++;
860117133Smux		mtx_unlock(&bounce_lock);
86166458Sdfr		count++;
86266458Sdfr		numpages--;
86366458Sdfr	}
86466458Sdfr	return (count);
86566458Sdfr}
86666458Sdfr
86766458Sdfrstatic int
868117133Smuxreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
86966458Sdfr{
87066458Sdfr	int pages;
87166458Sdfr
872117133Smux	mtx_assert(&bounce_lock, MA_OWNED);
87366458Sdfr	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
874117133Smux	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
875117133Smux		return (map->pagesneeded - (map->pagesreserved + pages));
87666458Sdfr	free_bpages -= pages;
87766458Sdfr	reserved_bpages += pages;
87866458Sdfr	map->pagesreserved += pages;
87966458Sdfr	pages = map->pagesneeded - map->pagesreserved;
88066458Sdfr
88166458Sdfr	return (pages);
88266458Sdfr}
88366458Sdfr
884117139Smuxstatic bus_addr_t
88566458Sdfradd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
886246713Skib    bus_addr_t addr, bus_size_t size)
88766458Sdfr{
88866458Sdfr	struct bounce_page *bpage;
88966458Sdfr
890134928Smarcel	KASSERT(map != NULL && map != &nobounce_dmamap,
891134928Smarcel	    ("add_bounce_page: bad map %p", map));
892134928Smarcel
89366458Sdfr	if (map->pagesneeded == 0)
89466458Sdfr		panic("add_bounce_page: map doesn't need any pages");
89566458Sdfr	map->pagesneeded--;
89666458Sdfr
89766458Sdfr	if (map->pagesreserved == 0)
89866458Sdfr		panic("add_bounce_page: map doesn't need any pages");
89966458Sdfr	map->pagesreserved--;
90066458Sdfr
901117133Smux	mtx_lock(&bounce_lock);
90266458Sdfr	bpage = STAILQ_FIRST(&bounce_page_list);
90366458Sdfr	if (bpage == NULL)
90466458Sdfr		panic("add_bounce_page: free page list is empty");
90566458Sdfr
90666458Sdfr	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
90766458Sdfr	reserved_bpages--;
90866458Sdfr	active_bpages++;
909117133Smux	mtx_unlock(&bounce_lock);
91066458Sdfr
911188350Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
912191201Sjhb		/* Page offset needs to be preserved. */
913188350Simp		bpage->vaddr |= vaddr & PAGE_MASK;
914188350Simp		bpage->busaddr |= vaddr & PAGE_MASK;
915188350Simp	}
91666458Sdfr	bpage->datavaddr = vaddr;
917246713Skib	bpage->dataaddr = addr;
91866458Sdfr	bpage->datacount = size;
91966458Sdfr	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
92066458Sdfr	return (bpage->busaddr);
92166458Sdfr}
92266458Sdfr
92366458Sdfrstatic void
92466458Sdfrfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
92566458Sdfr{
92666458Sdfr	struct bus_dmamap *map;
92766458Sdfr
92866458Sdfr	bpage->datavaddr = 0;
92966458Sdfr	bpage->datacount = 0;
930191201Sjhb	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
931191201Sjhb		/*
932191201Sjhb		 * Reset the bounce page to start at offset 0.  Other uses
933191201Sjhb		 * of this bounce page may need to store a full page of
934191201Sjhb		 * data and/or assume it starts on a page boundary.
935191201Sjhb		 */
936191201Sjhb		bpage->vaddr &= ~PAGE_MASK;
937191201Sjhb		bpage->busaddr &= ~PAGE_MASK;
938191201Sjhb	}
93966458Sdfr
940117133Smux	mtx_lock(&bounce_lock);
94166458Sdfr	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
94266458Sdfr	free_bpages++;
94366458Sdfr	active_bpages--;
94466458Sdfr	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
945117133Smux		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
94666458Sdfr			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
947223171Smarcel			STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map,
948223171Smarcel			    links);
94966458Sdfr			busdma_swi_pending = 1;
950134928Smarcel			total_deferred++;
95188900Sjhb			swi_sched(vm_ih, 0);
95266458Sdfr		}
95366458Sdfr	}
954117133Smux	mtx_unlock(&bounce_lock);
95566458Sdfr}
95666458Sdfr
95766458Sdfrvoid
95866458Sdfrbusdma_swi(void)
95966458Sdfr{
960117126Sscottl	bus_dma_tag_t dmat;
96166458Sdfr	struct bus_dmamap *map;
96266458Sdfr
963117133Smux	mtx_lock(&bounce_lock);
96466458Sdfr	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
96566458Sdfr		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
966117133Smux		mtx_unlock(&bounce_lock);
967117126Sscottl		dmat = map->dmat;
968117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
969246713Skib		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
970246713Skib		    map->callback_arg, BUS_DMA_WAITOK);
971117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
972117133Smux		mtx_lock(&bounce_lock);
97366458Sdfr	}
974117133Smux	mtx_unlock(&bounce_lock);
97566458Sdfr}
976