1139790Simp/*-
266458Sdfr * Copyright (c) 1997 Justin T. Gibbs.
366458Sdfr * All rights reserved.
466458Sdfr *
566458Sdfr * Redistribution and use in source and binary forms, with or without
666458Sdfr * modification, are permitted provided that the following conditions
766458Sdfr * are met:
866458Sdfr * 1. Redistributions of source code must retain the above copyright
966458Sdfr *    notice, this list of conditions, and the following disclaimer,
1066458Sdfr *    without modification, immediately at the beginning of the file.
1166458Sdfr * 2. The name of the author may not be used to endorse or promote products
1266458Sdfr *    derived from this software without specific prior written permission.
1366458Sdfr *
1466458Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1566458Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1666458Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1766458Sdfr * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1866458Sdfr * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1966458Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2066458Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2166458Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2266458Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2366458Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2466458Sdfr * SUCH DAMAGE.
2566458Sdfr */
2666458Sdfr
27117133Smux#include <sys/cdefs.h>
28117133Smux__FBSDID("$FreeBSD$");
29117133Smux
3066458Sdfr#include <sys/param.h>
3166458Sdfr#include <sys/systm.h>
32117142Smarcel#include <sys/kernel.h>
3366458Sdfr#include <sys/malloc.h>
34112235Smux#include <sys/lock.h>
35112215Smux#include <sys/mutex.h>
3667636Sdfr#include <sys/bus.h>
3767636Sdfr#include <sys/interrupt.h>
38251874Sscottl#include <sys/memdesc.h>
39104486Ssam#include <sys/proc.h>
40251874Sscottl#include <sys/sysctl.h>
41104486Ssam#include <sys/uio.h>
4266458Sdfr
4366458Sdfr#include <vm/vm.h>
4466458Sdfr#include <vm/vm_page.h>
45104486Ssam#include <vm/vm_map.h>
4666458Sdfr
47112436Smux#include <machine/atomic.h>
4866458Sdfr#include <machine/bus.h>
4966458Sdfr#include <machine/md_var.h>
5066458Sdfr
51209026Smarcel#define	MAX_BPAGES	1024
5266458Sdfr
5366458Sdfrstruct bus_dma_tag {
54223171Smarcel	bus_dma_tag_t	parent;
55223171Smarcel	bus_size_t	alignment;
56223171Smarcel	bus_size_t	boundary;
57223171Smarcel	bus_addr_t	lowaddr;
58223171Smarcel	bus_addr_t	highaddr;
5966458Sdfr	bus_dma_filter_t *filter;
60223171Smarcel	void		*filterarg;
61223171Smarcel	bus_size_t	maxsize;
62223171Smarcel	u_int		nsegments;
63223171Smarcel	bus_size_t	maxsegsz;
64223171Smarcel	int		flags;
65223171Smarcel	int		ref_count;
66223171Smarcel	int		map_count;
67223171Smarcel	bus_dma_lock_t	*lockfunc;
68223171Smarcel	void		*lockfuncarg;
69134928Smarcel	bus_dma_segment_t *segments;
7066458Sdfr};
7166458Sdfr
7266458Sdfrstruct bounce_page {
7366458Sdfr	vm_offset_t	vaddr;		/* kva of bounce buffer */
7466458Sdfr	bus_addr_t	busaddr;	/* Physical address */
7566458Sdfr	vm_offset_t	datavaddr;	/* kva of client data */
76251874Sscottl	bus_addr_t	dataaddr;	/* client physical address */
7766458Sdfr	bus_size_t	datacount;	/* client data count */
7866458Sdfr	STAILQ_ENTRY(bounce_page) links;
7966458Sdfr};
8066458Sdfr
81209026Smarcelu_int busdma_swi_pending;
8266458Sdfr
83117133Smuxstatic struct mtx bounce_lock;
8466458Sdfrstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
8566458Sdfrstatic int free_bpages;
8666458Sdfrstatic int reserved_bpages;
8766458Sdfrstatic int active_bpages;
8866458Sdfrstatic int total_bpages;
89134928Smarcelstatic int total_bounced;
90134928Smarcelstatic int total_deferred;
9166458Sdfr
92248085Smariusstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
93134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
94223171Smarcel    "Free bounce pages");
95134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
96223171Smarcel    0, "Reserved bounce pages");
97134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
98223171Smarcel    "Active bounce pages");
99134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
100223171Smarcel    "Total bounce pages");
101134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
102223171Smarcel    "Total bounce requests");
103223171SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred,
104223171Smarcel    0, "Total bounce requests that were deferred");
105134928Smarcel
10666458Sdfrstruct bus_dmamap {
107223171Smarcel	struct bp_list	bpages;
108223171Smarcel	int		pagesneeded;
109223171Smarcel	int		pagesreserved;
110223171Smarcel	bus_dma_tag_t	dmat;
111251874Sscottl	struct memdesc	mem;
11266458Sdfr	bus_dmamap_callback_t *callback;
113223171Smarcel	void		*callback_arg;
11466458Sdfr	STAILQ_ENTRY(bus_dmamap) links;
11566458Sdfr};
11666458Sdfr
11766458Sdfrstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
11866458Sdfrstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
11966458Sdfrstatic struct bus_dmamap nobounce_dmamap;
12066458Sdfr
121117133Smuxstatic void init_bounce_pages(void *dummy);
12266458Sdfrstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
123117139Smuxstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
124223171Smarcel    int commit);
125117139Smuxstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
126251874Sscottl    vm_offset_t vaddr, bus_addr_t addr, bus_size_t size);
12766458Sdfrstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
128134928Smarcelstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
129223171Smarcel    bus_size_t len);
13066458Sdfr
131117139Smux/*
132117139Smux * Return true if a match is made.
133117139Smux *
134117139Smux * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
135117139Smux *
136117139Smux * If paddr is within the bounds of the dma tag then call the filter callback
137117139Smux * to check for a match, if there is no filter callback then assume a match.
138117139Smux */
13966458Sdfrstatic __inline int
140134928Smarcelrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
14166458Sdfr{
142134928Smarcel	bus_size_t bndy;
14366458Sdfr	int retval;
14466458Sdfr
14566458Sdfr	retval = 0;
146134928Smarcel	bndy = dmat->boundary;
14766458Sdfr	do {
148223171Smarcel		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) ||
149223171Smarcel		    (paddr & (dmat->alignment - 1)) != 0 ||
150223171Smarcel		    (paddr & bndy) != ((paddr + len) & bndy)) &&
151223171Smarcel		    (dmat->filter == NULL ||
152223171Smarcel		    (*dmat->filter)(dmat->filterarg, paddr) != 0))
15366458Sdfr			retval = 1;
154223171Smarcel		dmat = dmat->parent;
15566458Sdfr	} while (retval == 0 && dmat != NULL);
15666458Sdfr	return (retval);
15766458Sdfr}
15866458Sdfr
159117126Sscottl/*
160117126Sscottl * Convenience function for manipulating driver locks from busdma (during
161117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
162117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
163117126Sscottl * non-mutex locking scheme don't have to use this at all.
164117126Sscottl */
165117126Sscottlvoid
166117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
167117126Sscottl{
168117126Sscottl	struct mtx *dmtx;
169117126Sscottl
170117126Sscottl	dmtx = (struct mtx *)arg;
171117126Sscottl	switch (op) {
172117126Sscottl	case BUS_DMA_LOCK:
173117126Sscottl		mtx_lock(dmtx);
174117126Sscottl		break;
175117126Sscottl	case BUS_DMA_UNLOCK:
176117126Sscottl		mtx_unlock(dmtx);
177117126Sscottl		break;
178117126Sscottl	default:
179117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
180117126Sscottl	}
181117126Sscottl}
182117126Sscottl
183117126Sscottl/*
184117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
185117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
186117126Sscottl * with the tag are meant to never be defered.
187117126Sscottl * XXX Should have a way to identify which driver is responsible here.
188117126Sscottl */
189117126Sscottlstatic void
190117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
191117126Sscottl{
192117126Sscottl	panic("driver error: busdma dflt_lock called");
193117126Sscottl}
194117126Sscottl
19566458Sdfr#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
196223171Smarcel
19766458Sdfr/*
19866458Sdfr * Allocate a device specific dma_tag.
19966458Sdfr */
20066458Sdfrint
20166458Sdfrbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
202223171Smarcel    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
203223171Smarcel    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
204223171Smarcel    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
205223171Smarcel    void *lockfuncarg, bus_dma_tag_t *dmat)
20666458Sdfr{
20766458Sdfr	bus_dma_tag_t newtag;
20866458Sdfr	int error = 0;
20966458Sdfr
210134928Smarcel	/* Basic sanity checking */
211134928Smarcel	if (boundary != 0 && boundary < maxsegsz)
212134928Smarcel		maxsegsz = boundary;
213134928Smarcel
21466458Sdfr	/* Return a NULL tag on failure */
21566458Sdfr	*dmat = NULL;
21666458Sdfr
21766458Sdfr	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
21866458Sdfr	if (newtag == NULL)
21966458Sdfr		return (ENOMEM);
22066458Sdfr
22166458Sdfr	newtag->parent = parent;
22266458Sdfr	newtag->alignment = alignment;
22366458Sdfr	newtag->boundary = boundary;
22466458Sdfr	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
22566458Sdfr	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
22666458Sdfr	newtag->filter = filter;
22766458Sdfr	newtag->filterarg = filterarg;
22866458Sdfr	newtag->maxsize = maxsize;
22966458Sdfr	newtag->nsegments = nsegments;
23066458Sdfr	newtag->maxsegsz = maxsegsz;
23166458Sdfr	newtag->flags = flags;
23266458Sdfr	newtag->ref_count = 1; /* Count ourself */
23366458Sdfr	newtag->map_count = 0;
234117126Sscottl	if (lockfunc != NULL) {
235117126Sscottl		newtag->lockfunc = lockfunc;
236117126Sscottl		newtag->lockfuncarg = lockfuncarg;
237117126Sscottl	} else {
238117126Sscottl		newtag->lockfunc = dflt_lock;
239117126Sscottl		newtag->lockfuncarg = NULL;
240117126Sscottl	}
241134928Smarcel	newtag->segments = NULL;
242134928Smarcel
24366458Sdfr	/* Take into account any restrictions imposed by our parent tag */
24466458Sdfr	if (parent != NULL) {
24566458Sdfr		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
24666458Sdfr		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
247134934Sscottl		if (newtag->boundary == 0)
248134934Sscottl			newtag->boundary = parent->boundary;
249134934Sscottl		else if (parent->boundary != 0)
250134934Sscottl			newtag->boundary = MIN(parent->boundary,
251223171Smarcel			    newtag->boundary);
25266458Sdfr		if (newtag->filter == NULL) {
25366458Sdfr			/*
25466458Sdfr			 * Short circuit looking at our parent directly
25566458Sdfr			 * since we have encapsulated all of its information
25666458Sdfr			 */
25766458Sdfr			newtag->filter = parent->filter;
25866458Sdfr			newtag->filterarg = parent->filterarg;
25966458Sdfr			newtag->parent = parent->parent;
26066458Sdfr		}
261112436Smux		if (newtag->parent != NULL)
262112436Smux			atomic_add_int(&parent->ref_count, 1);
26366458Sdfr	}
264134928Smarcel
265251962Smarius	if (newtag->lowaddr < paddr_max && (flags & BUS_DMA_ALLOCNOW) != 0) {
26666458Sdfr		/* Must bounce */
26766458Sdfr
26866458Sdfr		if (ptoa(total_bpages) < maxsize) {
26966458Sdfr			int pages;
27066458Sdfr
27166458Sdfr			pages = atop(maxsize) - total_bpages;
27266458Sdfr
27366458Sdfr			/* Add pages to our bounce pool */
27466458Sdfr			if (alloc_bounce_pages(newtag, pages) < pages)
27566458Sdfr				error = ENOMEM;
27666458Sdfr		}
27766458Sdfr		/* Performed initial allocation */
27866458Sdfr		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
27966458Sdfr	}
280223171Smarcel
28166458Sdfr	if (error != 0) {
28266458Sdfr		free(newtag, M_DEVBUF);
28366458Sdfr	} else {
28466458Sdfr		*dmat = newtag;
28566458Sdfr	}
28666458Sdfr	return (error);
28766458Sdfr}
28866458Sdfr
28966458Sdfrint
29066458Sdfrbus_dma_tag_destroy(bus_dma_tag_t dmat)
29166458Sdfr{
29266458Sdfr	if (dmat != NULL) {
29366458Sdfr
29466458Sdfr		if (dmat->map_count != 0)
29566458Sdfr			return (EBUSY);
29666458Sdfr
29766458Sdfr		while (dmat != NULL) {
29866458Sdfr			bus_dma_tag_t parent;
29966458Sdfr
30066458Sdfr			parent = dmat->parent;
301112436Smux			atomic_subtract_int(&dmat->ref_count, 1);
30266458Sdfr			if (dmat->ref_count == 0) {
303134928Smarcel				if (dmat->segments != NULL)
304134928Smarcel					free(dmat->segments, M_DEVBUF);
30566458Sdfr				free(dmat, M_DEVBUF);
306117139Smux				/*
307117139Smux				 * Last reference count, so
308117139Smux				 * release our reference
309117139Smux				 * count on our parent.
310117139Smux				 */
311117139Smux				dmat = parent;
312117139Smux			} else
313117139Smux				dmat = NULL;
31466458Sdfr		}
31566458Sdfr	}
31666458Sdfr	return (0);
31766458Sdfr}
31866458Sdfr
31966458Sdfr/*
32066458Sdfr * Allocate a handle for mapping from kva/uva/physical
32166458Sdfr * address space into bus device space.
32266458Sdfr */
32366458Sdfrint
32466458Sdfrbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
32566458Sdfr{
32666458Sdfr	int error;
32766458Sdfr
32866458Sdfr	error = 0;
32966458Sdfr
330134928Smarcel	if (dmat->segments == NULL) {
331134928Smarcel		dmat->segments = (bus_dma_segment_t *)malloc(
332134928Smarcel		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
333134928Smarcel		    M_NOWAIT);
334134928Smarcel		if (dmat->segments == NULL)
33566458Sdfr			return (ENOMEM);
33666458Sdfr	}
33766458Sdfr
338134928Smarcel	/*
339134928Smarcel	 * Bouncing might be required if the driver asks for an active
340134928Smarcel	 * exclusion region, a data alignment that is stricter than 1, and/or
341134928Smarcel	 * an active address boundary.
342134928Smarcel	 */
343251962Smarius	if (dmat->lowaddr < paddr_max) {
34466458Sdfr		/* Must bounce */
34566458Sdfr		int maxpages;
34666458Sdfr
34766458Sdfr		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
348223171Smarcel		    M_NOWAIT | M_ZERO);
34969781Sdwmalone		if (*mapp == NULL)
35066458Sdfr			return (ENOMEM);
35169781Sdwmalone
35269781Sdwmalone		/* Initialize the new map */
35369781Sdwmalone		STAILQ_INIT(&((*mapp)->bpages));
35469781Sdwmalone
35566458Sdfr		/*
35666458Sdfr		 * Attempt to add pages to our pool on a per-instance
35766458Sdfr		 * basis up to a sane limit.
35866458Sdfr		 */
359251962Smarius		maxpages = MIN(MAX_BPAGES, atop(paddr_max - dmat->lowaddr));
36066458Sdfr		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
361134928Smarcel		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
36266458Sdfr			int pages;
36366458Sdfr
364134928Smarcel			pages = MAX(atop(dmat->maxsize), 1);
36566458Sdfr			pages = MIN(maxpages - total_bpages, pages);
366134928Smarcel			if (alloc_bounce_pages(dmat, pages) < pages)
367134928Smarcel				error = ENOMEM;
36866458Sdfr
36966458Sdfr			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
37066458Sdfr				if (error == 0)
37166458Sdfr					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
37266458Sdfr			} else {
37366458Sdfr				error = 0;
37466458Sdfr			}
37566458Sdfr		}
37666458Sdfr	} else {
377134928Smarcel		*mapp = NULL;
37866458Sdfr	}
37966458Sdfr	if (error == 0)
38066458Sdfr		dmat->map_count++;
38166458Sdfr	return (error);
38266458Sdfr}
38366458Sdfr
38466458Sdfr/*
38566458Sdfr * Destroy a handle for mapping from kva/uva/physical
38666458Sdfr * address space into bus device space.
38766458Sdfr */
38866458Sdfrint
38966458Sdfrbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
39066458Sdfr{
39166458Sdfr
392109490Smux	if (map != NULL && map != &nobounce_dmamap) {
39366458Sdfr		if (STAILQ_FIRST(&map->bpages) != NULL)
39466458Sdfr			return (EBUSY);
39566458Sdfr		free(map, M_DEVBUF);
39666458Sdfr	}
39766458Sdfr	dmat->map_count--;
39866458Sdfr	return (0);
39966458Sdfr}
40066458Sdfr
40166458Sdfr
40266458Sdfr/*
40366458Sdfr * Allocate a piece of memory that can be efficiently mapped into
40466458Sdfr * bus device space based on the constraints lited in the dma tag.
40566458Sdfr * A dmamap to for use with dmamap_load is also allocated.
40666458Sdfr */
40766458Sdfrint
40866458Sdfrbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
409223171Smarcel    bus_dmamap_t *mapp)
41066458Sdfr{
411159130Ssilby	int mflags;
412118081Smux
413118081Smux	if (flags & BUS_DMA_NOWAIT)
414118081Smux		mflags = M_NOWAIT;
415118081Smux	else
416118081Smux		mflags = M_WAITOK;
417118081Smux
41866458Sdfr	/* If we succeed, no mapping/bouncing will be required */
419134928Smarcel	*mapp = NULL;
42066458Sdfr
421134928Smarcel	if (dmat->segments == NULL) {
422159130Ssilby		dmat->segments = (bus_dma_segment_t *)malloc(
423159130Ssilby		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
424180533Salc		    mflags);
425134928Smarcel		if (dmat->segments == NULL)
426134928Smarcel			return (ENOMEM);
427134928Smarcel	}
428180533Salc	if (flags & BUS_DMA_ZERO)
429180533Salc		mflags |= M_ZERO;
430134928Smarcel
431223171Smarcel	/*
432159093Smjacob	 * XXX:
433159093Smjacob	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
434159093Smjacob	 * alignment guarantees of malloc need to be nailed down, and the
435159093Smjacob	 * code below should be rewritten to take that into account.
436159093Smjacob	 *
437159130Ssilby	 * In the meantime, we'll warn the user if malloc gets it wrong.
438159093Smjacob	 */
439159093Smjacob	if ((dmat->maxsize <= PAGE_SIZE) &&
440159093Smjacob	   (dmat->alignment < dmat->maxsize) &&
441251962Smarius	    dmat->lowaddr >= paddr_max) {
442118081Smux		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
44366458Sdfr	} else {
44466458Sdfr		/*
44566458Sdfr		 * XXX Use Contigmalloc until it is merged into this facility
44666458Sdfr		 *     and handles multi-seg allocations.  Nobody is doing
44766458Sdfr		 *     multi-seg allocations yet though.
448134928Smarcel		 * XXX Certain AGP hardware does.
44966458Sdfr		 */
450118081Smux		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
45166458Sdfr		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
45266458Sdfr		    dmat->boundary);
45366458Sdfr	}
454159130Ssilby	if (*vaddr == NULL)
45566458Sdfr		return (ENOMEM);
456213282Sneel	else if (vtophys(*vaddr) & (dmat->alignment - 1))
457163386Shrs		printf("bus_dmamem_alloc failed to align memory properly.\n");
45866458Sdfr	return (0);
45966458Sdfr}
46066458Sdfr
46166458Sdfr/*
46266458Sdfr * Free a piece of memory and it's allociated dmamap, that was allocated
463117139Smux * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
46466458Sdfr */
46566458Sdfrvoid
46666458Sdfrbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
46766458Sdfr{
46866458Sdfr	/*
46966458Sdfr	 * dmamem does not need to be bounced, so the map should be
47066458Sdfr	 * NULL
47166458Sdfr	 */
472134928Smarcel	if (map != NULL)
47366458Sdfr		panic("bus_dmamem_free: Invalid map freed\n");
474159130Ssilby	if ((dmat->maxsize <= PAGE_SIZE) &&
475159130Ssilby	   (dmat->alignment < dmat->maxsize) &&
476251962Smarius	    dmat->lowaddr >= paddr_max)
477112195Smux		free(vaddr, M_DEVBUF);
478112196Smux	else {
479112195Smux		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
480112196Smux	}
48166458Sdfr}
48266458Sdfr
483251874Sscottlstatic void
484251874Sscottl_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
485251874Sscottl    bus_size_t buflen, int flags)
48666458Sdfr{
487251874Sscottl	bus_addr_t curaddr;
488134928Smarcel	bus_size_t sgsize;
489251874Sscottl
490251874Sscottl	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
491251874Sscottl	    dmat->alignment > 1) && map != &nobounce_dmamap &&
492251874Sscottl	    map->pagesneeded == 0) {
493251874Sscottl		/*
494251874Sscottl		 * Count the number of bounce pages
495251874Sscottl		 * needed in order to complete this transfer
496251874Sscottl		 */
497251874Sscottl		curaddr = buf;
498251874Sscottl		while (buflen != 0) {
499251874Sscottl			sgsize = MIN(buflen, dmat->maxsegsz);
500251874Sscottl			if (run_filter(dmat, curaddr, 0) != 0) {
501251874Sscottl				sgsize = MIN(sgsize, PAGE_SIZE);
502251874Sscottl				map->pagesneeded++;
503251874Sscottl			}
504251874Sscottl			curaddr += sgsize;
505251874Sscottl			buflen -= sgsize;
506251874Sscottl		}
507251874Sscottl	}
508251874Sscottl}
509251874Sscottl
510251874Sscottlstatic void
511251874Sscottl_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
512251874Sscottl    void *buf, bus_size_t buflen, int flags)
513251874Sscottl{
514134928Smarcel	vm_offset_t vaddr;
515251874Sscottl	vm_offset_t vendaddr;
516134928Smarcel	bus_addr_t paddr;
51766458Sdfr
518251962Smarius	if ((dmat->lowaddr < paddr_max || dmat->boundary > 0 ||
519134928Smarcel	    dmat->alignment > 1) && map != &nobounce_dmamap &&
520134928Smarcel	    map->pagesneeded == 0) {
52166458Sdfr		/*
52266458Sdfr		 * Count the number of bounce pages
52366458Sdfr		 * needed in order to complete this transfer
52466458Sdfr		 */
525134928Smarcel		vaddr = trunc_page((vm_offset_t)buf);
52666458Sdfr		vendaddr = (vm_offset_t)buf + buflen;
52766458Sdfr
52866458Sdfr		while (vaddr < vendaddr) {
529251874Sscottl			if (pmap == kernel_pmap)
530251874Sscottl				paddr = pmap_kextract(vaddr);
531251874Sscottl			else
532191011Skib				paddr = pmap_extract(pmap, vaddr);
533173988Sjhb			if (run_filter(dmat, paddr, 0) != 0)
53466458Sdfr				map->pagesneeded++;
53566458Sdfr			vaddr += PAGE_SIZE;
53666458Sdfr		}
53766458Sdfr	}
538251874Sscottl}
53966458Sdfr
540251874Sscottlstatic int
541251874Sscottl_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
542251874Sscottl{
543134928Smarcel
54466458Sdfr	/* Reserve Necessary Bounce Pages */
545251874Sscottl	mtx_lock(&bounce_lock);
546251874Sscottl	if (flags & BUS_DMA_NOWAIT) {
547251874Sscottl		if (reserve_bounce_pages(dmat, map, 0) != 0) {
548251874Sscottl			mtx_unlock(&bounce_lock);
549251874Sscottl			return (ENOMEM);
55066458Sdfr		}
551251874Sscottl	} else {
552251874Sscottl		if (reserve_bounce_pages(dmat, map, 1) != 0) {
553251874Sscottl			/* Queue us for resources */
554251874Sscottl			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
555251874Sscottl			    map, links);
556251874Sscottl			mtx_unlock(&bounce_lock);
557251874Sscottl			return (EINPROGRESS);
558251874Sscottl		}
55966458Sdfr	}
560251874Sscottl	mtx_unlock(&bounce_lock);
56166458Sdfr
562251874Sscottl	return (0);
563251874Sscottl}
564104486Ssam
565251874Sscottl/*
566251874Sscottl * Add a single contiguous physical range to the segment list.
567251874Sscottl */
568251874Sscottlstatic int
569251874Sscottl_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
570251874Sscottl    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
571251874Sscottl{
572251874Sscottl	bus_addr_t baddr, bmask;
573251874Sscottl	int seg;
574104486Ssam
575251874Sscottl	/*
576251874Sscottl	 * Make sure we don't cross any boundaries.
577251874Sscottl	 */
578251874Sscottl	bmask = ~(dmat->boundary - 1);
579251874Sscottl	if (dmat->boundary > 0) {
580251874Sscottl		baddr = (curaddr + dmat->boundary) & bmask;
581251874Sscottl		if (sgsize > (baddr - curaddr))
582251874Sscottl			sgsize = (baddr - curaddr);
583251874Sscottl	}
584104486Ssam
585251874Sscottl	/*
586251874Sscottl	 * Insert chunk into a segment, coalescing with
587251874Sscottl	 * previous segment if possible.
588251874Sscottl	 */
589251874Sscottl	seg = *segp;
590251874Sscottl	if (seg == -1) {
591251874Sscottl		seg = 0;
592251874Sscottl		segs[seg].ds_addr = curaddr;
593251874Sscottl		segs[seg].ds_len = sgsize;
594251874Sscottl	} else {
595251874Sscottl		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
596251874Sscottl		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
597251874Sscottl		    (dmat->boundary == 0 ||
598251874Sscottl		    (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
599251874Sscottl			segs[seg].ds_len += sgsize;
600251874Sscottl		else {
601251874Sscottl			if (++seg >= dmat->nsegments)
602251874Sscottl				return (0);
603104486Ssam			segs[seg].ds_addr = curaddr;
604104486Ssam			segs[seg].ds_len = sgsize;
605104486Ssam		}
606104486Ssam	}
607104486Ssam	*segp = seg;
608251874Sscottl	return (sgsize);
609104486Ssam}
610104486Ssam
611104486Ssam/*
612251874Sscottl * Utility function to load a physical buffer.  segp contains
613251874Sscottl * the starting segment on entrace, and the ending segment on exit.
614134928Smarcel */
615134928Smarcelint
616251874Sscottl_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
617251874Sscottl    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
618251874Sscottl    int *segp)
619134928Smarcel{
620251874Sscottl	bus_addr_t curaddr;
621251874Sscottl	bus_size_t sgsize;
622251874Sscottl	int error;
623134928Smarcel
624251874Sscottl	if (map == NULL)
625251874Sscottl		map = &nobounce_dmamap;
626134928Smarcel
627251874Sscottl	if (segs == NULL)
628251874Sscottl		segs = dmat->segments;
629134928Smarcel
630251874Sscottl	if (map != &nobounce_dmamap) {
631251874Sscottl		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
632251874Sscottl		if (map->pagesneeded != 0) {
633251874Sscottl			error = _bus_dmamap_reserve_pages(dmat, map, flags);
634251874Sscottl			if (error)
635251874Sscottl				return (error);
636251874Sscottl		}
637251874Sscottl	}
638134928Smarcel
639251874Sscottl	while (buflen > 0) {
640251874Sscottl		curaddr = buf;
641251874Sscottl		sgsize = MIN(buflen, dmat->maxsegsz);
642251874Sscottl		if (map->pagesneeded != 0 &&
643251874Sscottl		    run_filter(dmat, curaddr, sgsize)) {
644251874Sscottl			sgsize = MIN(sgsize, PAGE_SIZE);
645251874Sscottl			curaddr = add_bounce_page(dmat, map, 0, curaddr,
646251874Sscottl			    sgsize);
647251874Sscottl		}
648251874Sscottl		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
649251874Sscottl		    segp);
650251874Sscottl		if (sgsize == 0)
651251874Sscottl			break;
652251874Sscottl		buf += sgsize;
653251874Sscottl		buflen -= sgsize;
654251874Sscottl	}
655134928Smarcel
656251874Sscottl	/*
657251874Sscottl	 * Did we fit?
658251874Sscottl	 */
659251874Sscottl	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
660134928Smarcel}
661134928Smarcel/*
662251874Sscottl * Utility function to load a linear buffer.  segp contains
663251874Sscottl * the starting segment on entrace, and the ending segment on exit.
664104486Ssam */
665104486Ssamint
666251874Sscottl_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
667251874Sscottl    bus_size_t buflen, pmap_t pmap, int flags,
668251874Sscottl    bus_dma_segment_t *segs, int *segp)
669104486Ssam{
670251874Sscottl	bus_size_t sgsize;
671251874Sscottl	bus_addr_t curaddr;
672251874Sscottl	vm_offset_t vaddr;
673251874Sscottl	int error;
674104486Ssam
675251874Sscottl	if (map == NULL)
676251874Sscottl		map = &nobounce_dmamap;
677104486Ssam
678251874Sscottl	if (segs == NULL)
679251874Sscottl		segs = dmat->segments;
680104486Ssam
681251874Sscottl	if (map != &nobounce_dmamap) {
682251874Sscottl		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
683251874Sscottl		if (map->pagesneeded != 0) {
684251874Sscottl			error = _bus_dmamap_reserve_pages(dmat, map, flags);
685251874Sscottl			if (error)
686251874Sscottl				return (error);
687104486Ssam		}
688104486Ssam	}
689104486Ssam
690251874Sscottl	vaddr = (vm_offset_t)buf;
691104486Ssam
692251874Sscottl	while (buflen > 0) {
693251874Sscottl		/*
694251874Sscottl		 * Get the physical address for this segment.
695251874Sscottl		 */
696251874Sscottl		if (pmap == kernel_pmap)
697251874Sscottl			curaddr = pmap_kextract(vaddr);
698251874Sscottl		else
699251874Sscottl			curaddr = pmap_extract(pmap, vaddr);
700140311Sscottl
701251874Sscottl		/*
702251874Sscottl		 * Compute the segment size, and adjust counts.
703251874Sscottl		 */
704251874Sscottl		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
705251874Sscottl		if (sgsize > dmat->maxsegsz)
706251874Sscottl			sgsize = dmat->maxsegsz;
707251874Sscottl		if (buflen < sgsize)
708251874Sscottl			sgsize = buflen;
709140311Sscottl
710251874Sscottl		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
711251874Sscottl			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
712251874Sscottl			    sgsize);
713140311Sscottl
714251874Sscottl		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
715251874Sscottl		    segp);
716251874Sscottl		if (sgsize == 0)
717251874Sscottl			break;
718251874Sscottl
719251874Sscottl		vaddr += sgsize;
720251874Sscottl		buflen -= sgsize;
721140311Sscottl	}
722140311Sscottl
723251874Sscottl	/*
724251874Sscottl	 * Did we fit?
725251874Sscottl	 */
726251874Sscottl	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
727140311Sscottl}
728140311Sscottl
729251874Sscottl
730251874Sscottlvoid
731251874Sscottl__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
732251874Sscottl    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
733104486Ssam{
734251874Sscottl	if (map != NULL) {
735251874Sscottl		map->dmat = dmat;
736251874Sscottl		map->mem = *mem;
737251874Sscottl		map->callback = callback;
738251874Sscottl		map->callback_arg = callback_arg;
739104486Ssam	}
740251874Sscottl}
741104486Ssam
742251874Sscottlbus_dma_segment_t *
743251874Sscottl_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
744251874Sscottl    bus_dma_segment_t *segs, int nsegs, int error)
745251874Sscottl{
746104486Ssam
747251874Sscottl	if (segs == NULL)
748251874Sscottl		segs = dmat->segments;
749251874Sscottl	return (segs);
750104486Ssam}
751104486Ssam
752104486Ssam/*
75366458Sdfr * Release the mapping held by map.
75466458Sdfr */
75566458Sdfrvoid
75666458Sdfr_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
75766458Sdfr{
75866458Sdfr	struct bounce_page *bpage;
75966458Sdfr
76066458Sdfr	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
76166458Sdfr		STAILQ_REMOVE_HEAD(&map->bpages, links);
76266458Sdfr		free_bounce_page(dmat, bpage);
76366458Sdfr	}
76466458Sdfr}
76566458Sdfr
76666458Sdfrvoid
767115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
76866458Sdfr{
76966458Sdfr	struct bounce_page *bpage;
77066458Sdfr
77166458Sdfr	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
77266458Sdfr		/*
77366458Sdfr		 * Handle data bouncing.  We might also
77466458Sdfr		 * want to add support for invalidating
77566458Sdfr		 * the caches on broken hardware
77666458Sdfr		 */
777134928Smarcel
778113347Smux		if (op & BUS_DMASYNC_PREWRITE) {
77966458Sdfr			while (bpage != NULL) {
780251874Sscottl				if (bpage->datavaddr != 0)
781251874Sscottl					bcopy((void *)bpage->datavaddr,
782251874Sscottl					    (void *)bpage->vaddr,
783251874Sscottl					    bpage->datacount);
784251874Sscottl				else
785251874Sscottl					physcopyout(bpage->dataaddr,
786251874Sscottl					    (void *)bpage->vaddr,
787251874Sscottl					    bpage->datacount);
78866458Sdfr				bpage = STAILQ_NEXT(bpage, links);
78966458Sdfr			}
790167277Sscottl			total_bounced++;
791113347Smux		}
79266458Sdfr
793113347Smux		if (op & BUS_DMASYNC_POSTREAD) {
79466458Sdfr			while (bpage != NULL) {
795251874Sscottl				if (bpage->datavaddr != 0)
796251874Sscottl					bcopy((void *)bpage->vaddr,
797251874Sscottl					    (void *)bpage->datavaddr,
798251874Sscottl					    bpage->datacount);
799251874Sscottl				else
800251874Sscottl					physcopyin((void *)bpage->vaddr,
801251874Sscottl					    bpage->dataaddr,
802251874Sscottl					    bpage->datacount);
80366458Sdfr				bpage = STAILQ_NEXT(bpage, links);
80466458Sdfr			}
805167277Sscottl			total_bounced++;
80666458Sdfr		}
80766458Sdfr	}
80866458Sdfr}
80966458Sdfr
810117133Smuxstatic void
811117133Smuxinit_bounce_pages(void *dummy __unused)
812117133Smux{
813117133Smux
814117133Smux	free_bpages = 0;
815117133Smux	reserved_bpages = 0;
816117133Smux	active_bpages = 0;
817117133Smux	total_bpages = 0;
818117133Smux	STAILQ_INIT(&bounce_page_list);
819117133Smux	STAILQ_INIT(&bounce_map_waitinglist);
820117133Smux	STAILQ_INIT(&bounce_map_callbacklist);
821117133Smux	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
822117133Smux}
823117133SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
824117133Smux
82566458Sdfrstatic int
82666458Sdfralloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
82766458Sdfr{
82866458Sdfr	int count;
82966458Sdfr
83066458Sdfr	count = 0;
83166458Sdfr	while (numpages > 0) {
83266458Sdfr		struct bounce_page *bpage;
83366458Sdfr
83466458Sdfr		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
835223171Smarcel		    M_NOWAIT | M_ZERO);
83666458Sdfr		if (bpage == NULL)
83766458Sdfr			break;
83866458Sdfr		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
839223171Smarcel		    M_NOWAIT, 0ul, dmat->lowaddr, PAGE_SIZE, dmat->boundary);
840117139Smux		if (bpage->vaddr == 0) {
84166458Sdfr			free(bpage, M_DEVBUF);
84266458Sdfr			break;
84366458Sdfr		}
84466458Sdfr		bpage->busaddr = pmap_kextract(bpage->vaddr);
845117133Smux		mtx_lock(&bounce_lock);
84666458Sdfr		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
84766458Sdfr		total_bpages++;
84866458Sdfr		free_bpages++;
849117133Smux		mtx_unlock(&bounce_lock);
85066458Sdfr		count++;
85166458Sdfr		numpages--;
85266458Sdfr	}
85366458Sdfr	return (count);
85466458Sdfr}
85566458Sdfr
85666458Sdfrstatic int
857117133Smuxreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
85866458Sdfr{
85966458Sdfr	int pages;
86066458Sdfr
861117133Smux	mtx_assert(&bounce_lock, MA_OWNED);
86266458Sdfr	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
863117133Smux	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
864117133Smux		return (map->pagesneeded - (map->pagesreserved + pages));
86566458Sdfr	free_bpages -= pages;
86666458Sdfr	reserved_bpages += pages;
86766458Sdfr	map->pagesreserved += pages;
86866458Sdfr	pages = map->pagesneeded - map->pagesreserved;
86966458Sdfr
87066458Sdfr	return (pages);
87166458Sdfr}
87266458Sdfr
873117139Smuxstatic bus_addr_t
87466458Sdfradd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
875251874Sscottl    bus_addr_t addr, bus_size_t size)
87666458Sdfr{
87766458Sdfr	struct bounce_page *bpage;
87866458Sdfr
879134928Smarcel	KASSERT(map != NULL && map != &nobounce_dmamap,
880134928Smarcel	    ("add_bounce_page: bad map %p", map));
881134928Smarcel
88266458Sdfr	if (map->pagesneeded == 0)
88366458Sdfr		panic("add_bounce_page: map doesn't need any pages");
88466458Sdfr	map->pagesneeded--;
88566458Sdfr
88666458Sdfr	if (map->pagesreserved == 0)
88766458Sdfr		panic("add_bounce_page: map doesn't need any pages");
88866458Sdfr	map->pagesreserved--;
88966458Sdfr
890117133Smux	mtx_lock(&bounce_lock);
89166458Sdfr	bpage = STAILQ_FIRST(&bounce_page_list);
89266458Sdfr	if (bpage == NULL)
89366458Sdfr		panic("add_bounce_page: free page list is empty");
89466458Sdfr
89566458Sdfr	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
89666458Sdfr	reserved_bpages--;
89766458Sdfr	active_bpages++;
898117133Smux	mtx_unlock(&bounce_lock);
89966458Sdfr
900188350Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
901191201Sjhb		/* Page offset needs to be preserved. */
902188350Simp		bpage->vaddr |= vaddr & PAGE_MASK;
903188350Simp		bpage->busaddr |= vaddr & PAGE_MASK;
904188350Simp	}
90566458Sdfr	bpage->datavaddr = vaddr;
906251874Sscottl	bpage->dataaddr = addr;
90766458Sdfr	bpage->datacount = size;
90866458Sdfr	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
90966458Sdfr	return (bpage->busaddr);
91066458Sdfr}
91166458Sdfr
91266458Sdfrstatic void
91366458Sdfrfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
91466458Sdfr{
91566458Sdfr	struct bus_dmamap *map;
91666458Sdfr
91766458Sdfr	bpage->datavaddr = 0;
91866458Sdfr	bpage->datacount = 0;
919191201Sjhb	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
920191201Sjhb		/*
921191201Sjhb		 * Reset the bounce page to start at offset 0.  Other uses
922191201Sjhb		 * of this bounce page may need to store a full page of
923191201Sjhb		 * data and/or assume it starts on a page boundary.
924191201Sjhb		 */
925191201Sjhb		bpage->vaddr &= ~PAGE_MASK;
926191201Sjhb		bpage->busaddr &= ~PAGE_MASK;
927191201Sjhb	}
92866458Sdfr
929117133Smux	mtx_lock(&bounce_lock);
93066458Sdfr	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
93166458Sdfr	free_bpages++;
93266458Sdfr	active_bpages--;
93366458Sdfr	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
934117133Smux		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
93566458Sdfr			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
936223171Smarcel			STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map,
937223171Smarcel			    links);
93866458Sdfr			busdma_swi_pending = 1;
939134928Smarcel			total_deferred++;
94088900Sjhb			swi_sched(vm_ih, 0);
94166458Sdfr		}
94266458Sdfr	}
943117133Smux	mtx_unlock(&bounce_lock);
94466458Sdfr}
94566458Sdfr
94666458Sdfrvoid
94766458Sdfrbusdma_swi(void)
94866458Sdfr{
949117126Sscottl	bus_dma_tag_t dmat;
95066458Sdfr	struct bus_dmamap *map;
95166458Sdfr
952117133Smux	mtx_lock(&bounce_lock);
95366458Sdfr	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
95466458Sdfr		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
955117133Smux		mtx_unlock(&bounce_lock);
956117126Sscottl		dmat = map->dmat;
957117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
958251874Sscottl		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
959251874Sscottl		    map->callback_arg, BUS_DMA_WAITOK);
960117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
961117133Smux		mtx_lock(&bounce_lock);
96266458Sdfr	}
963117133Smux	mtx_unlock(&bounce_lock);
96466458Sdfr}
965