busdma_machdep.c revision 191011
1139790Simp/*-
266458Sdfr * Copyright (c) 1997 Justin T. Gibbs.
366458Sdfr * All rights reserved.
466458Sdfr *
566458Sdfr * Redistribution and use in source and binary forms, with or without
666458Sdfr * modification, are permitted provided that the following conditions
766458Sdfr * are met:
866458Sdfr * 1. Redistributions of source code must retain the above copyright
966458Sdfr *    notice, this list of conditions, and the following disclaimer,
1066458Sdfr *    without modification, immediately at the beginning of the file.
1166458Sdfr * 2. The name of the author may not be used to endorse or promote products
1266458Sdfr *    derived from this software without specific prior written permission.
1366458Sdfr *
1466458Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1566458Sdfr * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1666458Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1766458Sdfr * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1866458Sdfr * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1966458Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2066458Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2166458Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2266458Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2366458Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2466458Sdfr * SUCH DAMAGE.
2566458Sdfr */
2666458Sdfr
27117133Smux#include <sys/cdefs.h>
28117133Smux__FBSDID("$FreeBSD: head/sys/ia64/ia64/busdma_machdep.c 191011 2009-04-13 19:20:32Z kib $");
29117133Smux
3066458Sdfr#include <sys/param.h>
3166458Sdfr#include <sys/systm.h>
32117142Smarcel#include <sys/kernel.h>
3366458Sdfr#include <sys/malloc.h>
34104486Ssam#include <sys/mbuf.h>
35112235Smux#include <sys/lock.h>
36112215Smux#include <sys/mutex.h>
3767636Sdfr#include <sys/bus.h>
3867636Sdfr#include <sys/interrupt.h>
39104486Ssam#include <sys/proc.h>
40104486Ssam#include <sys/uio.h>
41134928Smarcel#include <sys/sysctl.h>
4266458Sdfr
4366458Sdfr#include <vm/vm.h>
4466458Sdfr#include <vm/vm_page.h>
45104486Ssam#include <vm/vm_map.h>
4666458Sdfr
47112436Smux#include <machine/atomic.h>
4866458Sdfr#include <machine/bus.h>
4966458Sdfr#include <machine/md_var.h>
5066458Sdfr
51134928Smarcel#define MAX_BPAGES 256
5266458Sdfr
5366458Sdfrstruct bus_dma_tag {
5466458Sdfr	bus_dma_tag_t	  parent;
5566458Sdfr	bus_size_t	  alignment;
5666458Sdfr	bus_size_t	  boundary;
5766458Sdfr	bus_addr_t	  lowaddr;
5866458Sdfr	bus_addr_t	  highaddr;
5966458Sdfr	bus_dma_filter_t *filter;
6066458Sdfr	void		 *filterarg;
6166458Sdfr	bus_size_t	  maxsize;
6266458Sdfr	u_int		  nsegments;
6366458Sdfr	bus_size_t	  maxsegsz;
6466458Sdfr	int		  flags;
6566458Sdfr	int		  ref_count;
6666458Sdfr	int		  map_count;
67117126Sscottl	bus_dma_lock_t	 *lockfunc;
68117126Sscottl	void		 *lockfuncarg;
69134928Smarcel	bus_dma_segment_t *segments;
7066458Sdfr};
7166458Sdfr
7266458Sdfrstruct bounce_page {
7366458Sdfr	vm_offset_t	vaddr;		/* kva of bounce buffer */
7466458Sdfr	bus_addr_t	busaddr;	/* Physical address */
7566458Sdfr	vm_offset_t	datavaddr;	/* kva of client data */
7666458Sdfr	bus_size_t	datacount;	/* client data count */
7766458Sdfr	STAILQ_ENTRY(bounce_page) links;
7866458Sdfr};
7966458Sdfr
8066458Sdfrint busdma_swi_pending;
8166458Sdfr
82117133Smuxstatic struct mtx bounce_lock;
8366458Sdfrstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
8466458Sdfrstatic int free_bpages;
8566458Sdfrstatic int reserved_bpages;
8666458Sdfrstatic int active_bpages;
8766458Sdfrstatic int total_bpages;
88134928Smarcelstatic int total_bounced;
89134928Smarcelstatic int total_deferred;
9066458Sdfr
91134928SmarcelSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
92134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
93134928Smarcel	   "Free bounce pages");
94134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
95134928Smarcel	   0, "Reserved bounce pages");
96134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
97134928Smarcel	   "Active bounce pages");
98134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
99134928Smarcel	   "Total bounce pages");
100134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
101134928Smarcel	   "Total bounce requests");
102134928SmarcelSYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred, 0,
103134928Smarcel	   "Total bounce requests that were deferred");
104134928Smarcel
10566458Sdfrstruct bus_dmamap {
10666458Sdfr	struct bp_list	       bpages;
10766458Sdfr	int		       pagesneeded;
10866458Sdfr	int		       pagesreserved;
10966458Sdfr	bus_dma_tag_t	       dmat;
11066458Sdfr	void		      *buf;		/* unmapped buffer pointer */
11166458Sdfr	bus_size_t	       buflen;		/* unmapped buffer length */
11266458Sdfr	bus_dmamap_callback_t *callback;
11366458Sdfr	void		      *callback_arg;
11466458Sdfr	STAILQ_ENTRY(bus_dmamap) links;
11566458Sdfr};
11666458Sdfr
11766458Sdfrstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
11866458Sdfrstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
11966458Sdfrstatic struct bus_dmamap nobounce_dmamap;
12066458Sdfr
121117133Smuxstatic void init_bounce_pages(void *dummy);
12266458Sdfrstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
123117139Smuxstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
124117139Smux				int commit);
125117139Smuxstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
12666458Sdfr				   vm_offset_t vaddr, bus_size_t size);
12766458Sdfrstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
128134928Smarcelstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
129134928Smarcel			       bus_size_t len);
13066458Sdfr
131117139Smux/*
132117139Smux * Return true if a match is made.
133117139Smux *
134117139Smux * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
135117139Smux *
136117139Smux * If paddr is within the bounds of the dma tag then call the filter callback
137117139Smux * to check for a match, if there is no filter callback then assume a match.
138117139Smux */
13966458Sdfrstatic __inline int
140134928Smarcelrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
14166458Sdfr{
142134928Smarcel	bus_size_t bndy;
14366458Sdfr	int retval;
14466458Sdfr
14566458Sdfr	retval = 0;
146134928Smarcel	bndy = dmat->boundary;
147134928Smarcel
14866458Sdfr	do {
149134928Smarcel		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
150134928Smarcel		 || ((paddr & (dmat->alignment - 1)) != 0)
151134928Smarcel		 || ((paddr & bndy) != ((paddr + len) & bndy)))
15266458Sdfr		 && (dmat->filter == NULL
15366458Sdfr		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
15466458Sdfr			retval = 1;
15566458Sdfr
15666458Sdfr		dmat = dmat->parent;
15766458Sdfr	} while (retval == 0 && dmat != NULL);
15866458Sdfr	return (retval);
15966458Sdfr}
16066458Sdfr
161117126Sscottl/*
162117126Sscottl * Convenience function for manipulating driver locks from busdma (during
163117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
164117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
165117126Sscottl * non-mutex locking scheme don't have to use this at all.
166117126Sscottl */
167117126Sscottlvoid
168117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
169117126Sscottl{
170117126Sscottl	struct mtx *dmtx;
171117126Sscottl
172117126Sscottl	dmtx = (struct mtx *)arg;
173117126Sscottl	switch (op) {
174117126Sscottl	case BUS_DMA_LOCK:
175117126Sscottl		mtx_lock(dmtx);
176117126Sscottl		break;
177117126Sscottl	case BUS_DMA_UNLOCK:
178117126Sscottl		mtx_unlock(dmtx);
179117126Sscottl		break;
180117126Sscottl	default:
181117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
182117126Sscottl	}
183117126Sscottl}
184117126Sscottl
185117126Sscottl/*
186117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
187117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
188117126Sscottl * with the tag are meant to never be defered.
189117126Sscottl * XXX Should have a way to identify which driver is responsible here.
190117126Sscottl */
191117126Sscottlstatic void
192117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
193117126Sscottl{
194117126Sscottl	panic("driver error: busdma dflt_lock called");
195117126Sscottl}
196117126Sscottl
19766458Sdfr#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
19866458Sdfr/*
19966458Sdfr * Allocate a device specific dma_tag.
20066458Sdfr */
20166458Sdfrint
20266458Sdfrbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
20366458Sdfr		   bus_size_t boundary, bus_addr_t lowaddr,
20466458Sdfr		   bus_addr_t highaddr, bus_dma_filter_t *filter,
20566458Sdfr		   void *filterarg, bus_size_t maxsize, int nsegments,
206117126Sscottl		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
207117126Sscottl		   void *lockfuncarg, bus_dma_tag_t *dmat)
20866458Sdfr{
20966458Sdfr	bus_dma_tag_t newtag;
21066458Sdfr	int error = 0;
21166458Sdfr
212134928Smarcel	/* Basic sanity checking */
213134928Smarcel	if (boundary != 0 && boundary < maxsegsz)
214134928Smarcel		maxsegsz = boundary;
215134928Smarcel
21666458Sdfr	/* Return a NULL tag on failure */
21766458Sdfr	*dmat = NULL;
21866458Sdfr
21966458Sdfr	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
22066458Sdfr	if (newtag == NULL)
22166458Sdfr		return (ENOMEM);
22266458Sdfr
22366458Sdfr	newtag->parent = parent;
22466458Sdfr	newtag->alignment = alignment;
22566458Sdfr	newtag->boundary = boundary;
22666458Sdfr	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
22766458Sdfr	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
22866458Sdfr	newtag->filter = filter;
22966458Sdfr	newtag->filterarg = filterarg;
23066458Sdfr	newtag->maxsize = maxsize;
23166458Sdfr	newtag->nsegments = nsegments;
23266458Sdfr	newtag->maxsegsz = maxsegsz;
23366458Sdfr	newtag->flags = flags;
23466458Sdfr	newtag->ref_count = 1; /* Count ourself */
23566458Sdfr	newtag->map_count = 0;
236117126Sscottl	if (lockfunc != NULL) {
237117126Sscottl		newtag->lockfunc = lockfunc;
238117126Sscottl		newtag->lockfuncarg = lockfuncarg;
239117126Sscottl	} else {
240117126Sscottl		newtag->lockfunc = dflt_lock;
241117126Sscottl		newtag->lockfuncarg = NULL;
242117126Sscottl	}
243134928Smarcel	newtag->segments = NULL;
244134928Smarcel
24566458Sdfr	/* Take into account any restrictions imposed by our parent tag */
24666458Sdfr	if (parent != NULL) {
24766458Sdfr		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
24866458Sdfr		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
249134934Sscottl		if (newtag->boundary == 0)
250134934Sscottl			newtag->boundary = parent->boundary;
251134934Sscottl		else if (parent->boundary != 0)
252134934Sscottl			newtag->boundary = MIN(parent->boundary,
253134934Sscottl					       newtag->boundary);
25466458Sdfr		if (newtag->filter == NULL) {
25566458Sdfr			/*
25666458Sdfr			 * Short circuit looking at our parent directly
25766458Sdfr			 * since we have encapsulated all of its information
25866458Sdfr			 */
25966458Sdfr			newtag->filter = parent->filter;
26066458Sdfr			newtag->filterarg = parent->filterarg;
26166458Sdfr			newtag->parent = parent->parent;
26266458Sdfr		}
263112436Smux		if (newtag->parent != NULL)
264112436Smux			atomic_add_int(&parent->ref_count, 1);
26566458Sdfr	}
266134928Smarcel
26766458Sdfr	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
26866458Sdfr		/* Must bounce */
26966458Sdfr
27066458Sdfr		if (ptoa(total_bpages) < maxsize) {
27166458Sdfr			int pages;
27266458Sdfr
27366458Sdfr			pages = atop(maxsize) - total_bpages;
27466458Sdfr
27566458Sdfr			/* Add pages to our bounce pool */
27666458Sdfr			if (alloc_bounce_pages(newtag, pages) < pages)
27766458Sdfr				error = ENOMEM;
27866458Sdfr		}
27966458Sdfr		/* Performed initial allocation */
28066458Sdfr		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
28166458Sdfr	}
28266458Sdfr
28366458Sdfr	if (error != 0) {
28466458Sdfr		free(newtag, M_DEVBUF);
28566458Sdfr	} else {
28666458Sdfr		*dmat = newtag;
28766458Sdfr	}
28866458Sdfr	return (error);
28966458Sdfr}
29066458Sdfr
29166458Sdfrint
29266458Sdfrbus_dma_tag_destroy(bus_dma_tag_t dmat)
29366458Sdfr{
29466458Sdfr	if (dmat != NULL) {
29566458Sdfr
29666458Sdfr		if (dmat->map_count != 0)
29766458Sdfr			return (EBUSY);
29866458Sdfr
29966458Sdfr		while (dmat != NULL) {
30066458Sdfr			bus_dma_tag_t parent;
30166458Sdfr
30266458Sdfr			parent = dmat->parent;
303112436Smux			atomic_subtract_int(&dmat->ref_count, 1);
30466458Sdfr			if (dmat->ref_count == 0) {
305134928Smarcel				if (dmat->segments != NULL)
306134928Smarcel					free(dmat->segments, M_DEVBUF);
30766458Sdfr				free(dmat, M_DEVBUF);
308117139Smux				/*
309117139Smux				 * Last reference count, so
310117139Smux				 * release our reference
311117139Smux				 * count on our parent.
312117139Smux				 */
313117139Smux				dmat = parent;
314117139Smux			} else
315117139Smux				dmat = NULL;
31666458Sdfr		}
31766458Sdfr	}
31866458Sdfr	return (0);
31966458Sdfr}
32066458Sdfr
32166458Sdfr/*
32266458Sdfr * Allocate a handle for mapping from kva/uva/physical
32366458Sdfr * address space into bus device space.
32466458Sdfr */
32566458Sdfrint
32666458Sdfrbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
32766458Sdfr{
32866458Sdfr	int error;
32966458Sdfr
33066458Sdfr	error = 0;
33166458Sdfr
332134928Smarcel	if (dmat->segments == NULL) {
333134928Smarcel		dmat->segments = (bus_dma_segment_t *)malloc(
334134928Smarcel		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
335134928Smarcel		    M_NOWAIT);
336134928Smarcel		if (dmat->segments == NULL)
33766458Sdfr			return (ENOMEM);
33866458Sdfr	}
33966458Sdfr
340134928Smarcel	/*
341134928Smarcel	 * Bouncing might be required if the driver asks for an active
342134928Smarcel	 * exclusion region, a data alignment that is stricter than 1, and/or
343134928Smarcel	 * an active address boundary.
344134928Smarcel	 */
34566458Sdfr	if (dmat->lowaddr < ptoa(Maxmem)) {
34666458Sdfr		/* Must bounce */
34766458Sdfr		int maxpages;
34866458Sdfr
34966458Sdfr		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
35069781Sdwmalone					     M_NOWAIT | M_ZERO);
35169781Sdwmalone		if (*mapp == NULL)
35266458Sdfr			return (ENOMEM);
35369781Sdwmalone
35469781Sdwmalone		/* Initialize the new map */
35569781Sdwmalone		STAILQ_INIT(&((*mapp)->bpages));
35669781Sdwmalone
35766458Sdfr		/*
35866458Sdfr		 * Attempt to add pages to our pool on a per-instance
35966458Sdfr		 * basis up to a sane limit.
36066458Sdfr		 */
36166458Sdfr		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
36266458Sdfr		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
363134928Smarcel		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
36466458Sdfr			int pages;
36566458Sdfr
366134928Smarcel			pages = MAX(atop(dmat->maxsize), 1);
36766458Sdfr			pages = MIN(maxpages - total_bpages, pages);
368134928Smarcel			if (alloc_bounce_pages(dmat, pages) < pages)
369134928Smarcel				error = ENOMEM;
37066458Sdfr
37166458Sdfr			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
37266458Sdfr				if (error == 0)
37366458Sdfr					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
37466458Sdfr			} else {
37566458Sdfr				error = 0;
37666458Sdfr			}
37766458Sdfr		}
37866458Sdfr	} else {
379134928Smarcel		*mapp = NULL;
38066458Sdfr	}
38166458Sdfr	if (error == 0)
38266458Sdfr		dmat->map_count++;
38366458Sdfr	return (error);
38466458Sdfr}
38566458Sdfr
38666458Sdfr/*
38766458Sdfr * Destroy a handle for mapping from kva/uva/physical
38866458Sdfr * address space into bus device space.
38966458Sdfr */
39066458Sdfrint
39166458Sdfrbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
39266458Sdfr{
39366458Sdfr
394109490Smux	if (map != NULL && map != &nobounce_dmamap) {
39566458Sdfr		if (STAILQ_FIRST(&map->bpages) != NULL)
39666458Sdfr			return (EBUSY);
39766458Sdfr		free(map, M_DEVBUF);
39866458Sdfr	}
39966458Sdfr	dmat->map_count--;
40066458Sdfr	return (0);
40166458Sdfr}
40266458Sdfr
40366458Sdfr
40466458Sdfr/*
40566458Sdfr * Allocate a piece of memory that can be efficiently mapped into
40666458Sdfr * bus device space based on the constraints lited in the dma tag.
40766458Sdfr * A dmamap to for use with dmamap_load is also allocated.
40866458Sdfr */
40966458Sdfrint
41066458Sdfrbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
41166458Sdfr		 bus_dmamap_t *mapp)
41266458Sdfr{
413159130Ssilby	int mflags;
414118081Smux
415118081Smux	if (flags & BUS_DMA_NOWAIT)
416118081Smux		mflags = M_NOWAIT;
417118081Smux	else
418118081Smux		mflags = M_WAITOK;
419118081Smux
42066458Sdfr	/* If we succeed, no mapping/bouncing will be required */
421134928Smarcel	*mapp = NULL;
42266458Sdfr
423134928Smarcel	if (dmat->segments == NULL) {
424159130Ssilby		dmat->segments = (bus_dma_segment_t *)malloc(
425159130Ssilby		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
426180533Salc		    mflags);
427134928Smarcel		if (dmat->segments == NULL)
428134928Smarcel			return (ENOMEM);
429134928Smarcel	}
430180533Salc	if (flags & BUS_DMA_ZERO)
431180533Salc		mflags |= M_ZERO;
432134928Smarcel
433159093Smjacob	/*
434159093Smjacob	 * XXX:
435159093Smjacob	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
436159093Smjacob	 * alignment guarantees of malloc need to be nailed down, and the
437159093Smjacob	 * code below should be rewritten to take that into account.
438159093Smjacob	 *
439159130Ssilby	 * In the meantime, we'll warn the user if malloc gets it wrong.
440159093Smjacob	 */
441159093Smjacob	if ((dmat->maxsize <= PAGE_SIZE) &&
442159093Smjacob	   (dmat->alignment < dmat->maxsize) &&
443159130Ssilby	    dmat->lowaddr >= ptoa(Maxmem)) {
444118081Smux		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
44566458Sdfr	} else {
44666458Sdfr		/*
44766458Sdfr		 * XXX Use Contigmalloc until it is merged into this facility
44866458Sdfr		 *     and handles multi-seg allocations.  Nobody is doing
44966458Sdfr		 *     multi-seg allocations yet though.
450134928Smarcel		 * XXX Certain AGP hardware does.
45166458Sdfr		 */
452118081Smux		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
45366458Sdfr		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
45466458Sdfr		    dmat->boundary);
45566458Sdfr	}
456159130Ssilby	if (*vaddr == NULL)
45766458Sdfr		return (ENOMEM);
458159130Ssilby	else if ((uintptr_t)*vaddr & (dmat->alignment - 1))
459163386Shrs		printf("bus_dmamem_alloc failed to align memory properly.\n");
46066458Sdfr	return (0);
46166458Sdfr}
46266458Sdfr
46366458Sdfr/*
46466458Sdfr * Free a piece of memory and it's allociated dmamap, that was allocated
465117139Smux * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
46666458Sdfr */
46766458Sdfrvoid
46866458Sdfrbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
46966458Sdfr{
47066458Sdfr	/*
47166458Sdfr	 * dmamem does not need to be bounced, so the map should be
47266458Sdfr	 * NULL
47366458Sdfr	 */
474134928Smarcel	if (map != NULL)
47566458Sdfr		panic("bus_dmamem_free: Invalid map freed\n");
476159130Ssilby	if ((dmat->maxsize <= PAGE_SIZE) &&
477159130Ssilby	   (dmat->alignment < dmat->maxsize) &&
478159148Salc	    dmat->lowaddr >= ptoa(Maxmem))
479112195Smux		free(vaddr, M_DEVBUF);
480112196Smux	else {
481112195Smux		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
482112196Smux	}
48366458Sdfr}
48466458Sdfr
48566458Sdfr/*
486134928Smarcel * Utility function to load a linear buffer.  lastaddrp holds state
487134928Smarcel * between invocations (for multiple-buffer loads).  segp contains
488134928Smarcel * the starting segment on entrace, and the ending segment on exit.
489134928Smarcel * first indicates if this is the first invocation of this function.
49066458Sdfr */
491134928Smarcelstatic int
492134928Smarcel_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
493134928Smarcel			bus_dmamap_t map,
494134928Smarcel			void *buf, bus_size_t buflen,
495134928Smarcel			struct thread *td,
496134928Smarcel			int flags,
497134928Smarcel			bus_addr_t *lastaddrp,
498140418Sscottl			bus_dma_segment_t *segs,
499134928Smarcel			int *segp,
500134928Smarcel			int first)
50166458Sdfr{
502134928Smarcel	bus_size_t sgsize;
503134928Smarcel	bus_addr_t curaddr, lastaddr, baddr, bmask;
504134928Smarcel	vm_offset_t vaddr;
505134928Smarcel	bus_addr_t paddr;
506134928Smarcel	int seg;
507134928Smarcel	pmap_t pmap;
50866458Sdfr
509134928Smarcel	if (map == NULL)
510134928Smarcel		map = &nobounce_dmamap;
51166458Sdfr
512134928Smarcel	if (td != NULL)
513134928Smarcel		pmap = vmspace_pmap(td->td_proc->p_vmspace);
514134928Smarcel	else
515134928Smarcel		pmap = NULL;
51666458Sdfr
517134928Smarcel	if ((dmat->lowaddr < ptoa(Maxmem) || dmat->boundary > 0 ||
518134928Smarcel	    dmat->alignment > 1) && map != &nobounce_dmamap &&
519134928Smarcel	    map->pagesneeded == 0) {
520134928Smarcel		vm_offset_t vendaddr;
52166458Sdfr
52266458Sdfr		/*
52366458Sdfr		 * Count the number of bounce pages
52466458Sdfr		 * needed in order to complete this transfer
52566458Sdfr		 */
526134928Smarcel		vaddr = trunc_page((vm_offset_t)buf);
52766458Sdfr		vendaddr = (vm_offset_t)buf + buflen;
52866458Sdfr
52966458Sdfr		while (vaddr < vendaddr) {
530191011Skib			if (pmap != NULL)
531191011Skib				paddr = pmap_extract(pmap, vaddr);
532191011Skib			else
533191011Skib				paddr = pmap_kextract(vaddr);
534173988Sjhb			if (run_filter(dmat, paddr, 0) != 0)
53566458Sdfr				map->pagesneeded++;
53666458Sdfr			vaddr += PAGE_SIZE;
53766458Sdfr		}
53866458Sdfr	}
53966458Sdfr
540134928Smarcel	vaddr = (vm_offset_t)buf;
541134928Smarcel
54266458Sdfr	/* Reserve Necessary Bounce Pages */
54366458Sdfr	if (map->pagesneeded != 0) {
544117133Smux		mtx_lock(&bounce_lock);
545117133Smux		if (flags & BUS_DMA_NOWAIT) {
546117133Smux			if (reserve_bounce_pages(dmat, map, 0) != 0) {
547117133Smux				mtx_unlock(&bounce_lock);
548117133Smux				return (ENOMEM);
549117133Smux			}
550117139Smux		} else {
551117133Smux			if (reserve_bounce_pages(dmat, map, 1) != 0) {
552117133Smux				/* Queue us for resources */
553117133Smux				map->dmat = dmat;
554117133Smux				map->buf = buf;
555117133Smux				map->buflen = buflen;
556117133Smux				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
557117133Smux				    map, links);
558117133Smux				mtx_unlock(&bounce_lock);
559117133Smux				return (EINPROGRESS);
560117133Smux			}
56166458Sdfr		}
562117133Smux		mtx_unlock(&bounce_lock);
56366458Sdfr	}
56466458Sdfr
565104486Ssam	lastaddr = *lastaddrp;
566117139Smux	bmask = ~(dmat->boundary - 1);
567104486Ssam
568104486Ssam	for (seg = *segp; buflen > 0 ; ) {
569104486Ssam		/*
570104486Ssam		 * Get the physical address for this segment.
571104486Ssam		 */
572104486Ssam		if (pmap)
573104486Ssam			curaddr = pmap_extract(pmap, vaddr);
574104486Ssam		else
575104486Ssam			curaddr = pmap_kextract(vaddr);
576104486Ssam
577104486Ssam		/*
578104486Ssam		 * Compute the segment size, and adjust counts.
579104486Ssam		 */
580104486Ssam		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
581170086Syongari		if (sgsize > dmat->maxsegsz)
582170086Syongari			sgsize = dmat->maxsegsz;
583104486Ssam		if (buflen < sgsize)
584104486Ssam			sgsize = buflen;
585104486Ssam
586104486Ssam		/*
587104486Ssam		 * Make sure we don't cross any boundaries.
588104486Ssam		 */
589104486Ssam		if (dmat->boundary > 0) {
590104486Ssam			baddr = (curaddr + dmat->boundary) & bmask;
591104486Ssam			if (sgsize > (baddr - curaddr))
592104486Ssam				sgsize = (baddr - curaddr);
593104486Ssam		}
594104486Ssam
595134928Smarcel		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
596134928Smarcel			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
597134928Smarcel
598104486Ssam		/*
599104486Ssam		 * Insert chunk into a segment, coalescing with
600104486Ssam		 * previous segment if possible.
601104486Ssam		 */
602104486Ssam		if (first) {
603104486Ssam			segs[seg].ds_addr = curaddr;
604104486Ssam			segs[seg].ds_len = sgsize;
605104486Ssam			first = 0;
606104486Ssam		} else {
607173988Sjhb			if (curaddr == lastaddr &&
608104486Ssam			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
609104486Ssam			    (dmat->boundary == 0 ||
610104486Ssam			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
611104486Ssam				segs[seg].ds_len += sgsize;
612104486Ssam			else {
613104486Ssam				if (++seg >= dmat->nsegments)
614104486Ssam					break;
615104486Ssam				segs[seg].ds_addr = curaddr;
616104486Ssam				segs[seg].ds_len = sgsize;
617104486Ssam			}
618104486Ssam		}
619104486Ssam
620104486Ssam		lastaddr = curaddr + sgsize;
621104486Ssam		vaddr += sgsize;
622104486Ssam		buflen -= sgsize;
623104486Ssam	}
624104486Ssam
625104486Ssam	*segp = seg;
626104486Ssam	*lastaddrp = lastaddr;
627104486Ssam
628104486Ssam	/*
629104486Ssam	 * Did we fit?
630104486Ssam	 */
631104486Ssam	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
632104486Ssam}
633104486Ssam
634104486Ssam/*
635134928Smarcel * Map the buffer buf into bus space using the dmamap map.
636134928Smarcel */
637134928Smarcelint
638134928Smarcelbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
639134928Smarcel		bus_size_t buflen, bus_dmamap_callback_t *callback,
640134928Smarcel		void *callback_arg, int flags)
641134928Smarcel{
642134928Smarcel	bus_addr_t		lastaddr = 0;
643134928Smarcel	int			error, nsegs = 0;
644134928Smarcel
645134928Smarcel	if (map != NULL) {
646134928Smarcel		flags |= BUS_DMA_WAITOK;
647134928Smarcel		map->callback = callback;
648134928Smarcel		map->callback_arg = callback_arg;
649134928Smarcel	}
650134928Smarcel
651134928Smarcel	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
652140311Sscottl	    &lastaddr, dmat->segments, &nsegs, 1);
653134928Smarcel
654134928Smarcel	if (error == EINPROGRESS)
655134928Smarcel		return (error);
656134928Smarcel
657134928Smarcel	if (error)
658134928Smarcel		(*callback)(callback_arg, dmat->segments, 0, error);
659134928Smarcel	else
660134928Smarcel		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
661134928Smarcel
662134928Smarcel	return (0);
663134928Smarcel}
664134928Smarcel
665134928Smarcel/*
666104486Ssam * Like _bus_dmamap_load(), but for mbufs.
667104486Ssam */
668104486Ssamint
669104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
670104486Ssam		     struct mbuf *m0,
671104486Ssam		     bus_dmamap_callback2_t *callback, void *callback_arg,
672104486Ssam		     int flags)
673104486Ssam{
674104486Ssam	int nsegs, error;
675104486Ssam
676113255Sdes	M_ASSERTPKTHDR(m0);
677104486Ssam
678134928Smarcel	flags |= BUS_DMA_NOWAIT;
679104486Ssam	nsegs = 0;
680104486Ssam	error = 0;
681104486Ssam	if (m0->m_pkthdr.len <= dmat->maxsize) {
682104486Ssam		int first = 1;
683117139Smux		bus_addr_t lastaddr = 0;
684104486Ssam		struct mbuf *m;
685104486Ssam
686104486Ssam		for (m = m0; m != NULL && error == 0; m = m->m_next) {
687110335Sharti			if (m->m_len > 0) {
688134928Smarcel				error = _bus_dmamap_load_buffer(dmat, map,
689110335Sharti						m->m_data, m->m_len,
690110335Sharti						NULL, flags, &lastaddr,
691140311Sscottl						dmat->segments, &nsegs, first);
692110335Sharti				first = 0;
693110335Sharti			}
694104486Ssam		}
695104486Ssam	} else {
696104486Ssam		error = EINVAL;
697104486Ssam	}
698104486Ssam
699104486Ssam	if (error) {
700104486Ssam		/* force "no valid mappings" in callback */
701134928Smarcel		(*callback)(callback_arg, dmat->segments, 0, 0, error);
702104486Ssam	} else {
703134928Smarcel		(*callback)(callback_arg, dmat->segments, nsegs + 1,
704134928Smarcel		    m0->m_pkthdr.len, error);
705104486Ssam	}
706104486Ssam	return (error);
707104486Ssam}
708104486Ssam
709140311Sscottlint
710140311Sscottlbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
711140311Sscottl			struct mbuf *m0, bus_dma_segment_t *segs,
712140311Sscottl			int *nsegs, int flags)
713140311Sscottl{
714140418Sscottl	int error;
715140311Sscottl
716140311Sscottl	M_ASSERTPKTHDR(m0);
717140311Sscottl
718140311Sscottl	flags |= BUS_DMA_NOWAIT;
719140315Sscottl	*nsegs = 0;
720140311Sscottl	error = 0;
721140311Sscottl	if (m0->m_pkthdr.len <= dmat->maxsize) {
722140311Sscottl		int first = 1;
723140311Sscottl		bus_addr_t lastaddr = 0;
724140311Sscottl		struct mbuf *m;
725140311Sscottl
726140311Sscottl		for (m = m0; m != NULL && error == 0; m = m->m_next) {
727140311Sscottl			if (m->m_len > 0) {
728140311Sscottl				error = _bus_dmamap_load_buffer(dmat, map,
729140311Sscottl						m->m_data, m->m_len,
730140311Sscottl						NULL, flags, &lastaddr,
731140311Sscottl						segs, nsegs, first);
732140311Sscottl				first = 0;
733140311Sscottl			}
734140311Sscottl		}
735140311Sscottl		++*nsegs;
736140311Sscottl	} else {
737140311Sscottl		error = EINVAL;
738140311Sscottl	}
739140311Sscottl
740140311Sscottl	return (error);
741140311Sscottl}
742140311Sscottl
743104486Ssam/*
744104486Ssam * Like _bus_dmamap_load(), but for uios.
745104486Ssam */
746104486Ssamint
747104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
748104486Ssam		    struct uio *uio,
749104486Ssam		    bus_dmamap_callback2_t *callback, void *callback_arg,
750104486Ssam		    int flags)
751104486Ssam{
752117139Smux	bus_addr_t lastaddr;
753104486Ssam	int nsegs, error, first, i;
754104486Ssam	bus_size_t resid;
755104486Ssam	struct iovec *iov;
756104486Ssam	struct thread *td = NULL;
757104486Ssam
758134928Smarcel	flags |= BUS_DMA_NOWAIT;
759104486Ssam	resid = uio->uio_resid;
760104486Ssam	iov = uio->uio_iov;
761104486Ssam
762104486Ssam	if (uio->uio_segflg == UIO_USERSPACE) {
763104486Ssam		td = uio->uio_td;
764104486Ssam		KASSERT(td != NULL,
765104486Ssam			("bus_dmamap_load_uio: USERSPACE but no proc"));
766104486Ssam	}
767104486Ssam
768104486Ssam	nsegs = 0;
769104486Ssam	error = 0;
770104486Ssam	first = 1;
771104486Ssam	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
772104486Ssam		/*
773104486Ssam		 * Now at the first iovec to load.  Load each iovec
774104486Ssam		 * until we have exhausted the residual count.
775104486Ssam		 */
776104486Ssam		bus_size_t minlen =
777104486Ssam			resid < iov[i].iov_len ? resid : iov[i].iov_len;
778104486Ssam		caddr_t addr = (caddr_t) iov[i].iov_base;
779104486Ssam
780110335Sharti		if (minlen > 0) {
781134928Smarcel			error = _bus_dmamap_load_buffer(dmat, map, addr,
782140311Sscottl			    minlen, td, flags, &lastaddr, dmat->segments,
783140311Sscottl			    &nsegs, first);
784110335Sharti			first = 0;
785104486Ssam
786110335Sharti			resid -= minlen;
787110335Sharti		}
788104486Ssam	}
789104486Ssam
790104486Ssam	if (error) {
791104486Ssam		/* force "no valid mappings" in callback */
792134928Smarcel		(*callback)(callback_arg, dmat->segments, 0, 0, error);
793104486Ssam	} else {
794134928Smarcel		(*callback)(callback_arg, dmat->segments, nsegs + 1,
795134928Smarcel		    uio->uio_resid, error);
796104486Ssam	}
797104486Ssam	return (error);
798104486Ssam}
799104486Ssam
800104486Ssam/*
80166458Sdfr * Release the mapping held by map.
80266458Sdfr */
80366458Sdfrvoid
80466458Sdfr_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
80566458Sdfr{
80666458Sdfr	struct bounce_page *bpage;
80766458Sdfr
80866458Sdfr	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
80966458Sdfr		STAILQ_REMOVE_HEAD(&map->bpages, links);
81066458Sdfr		free_bounce_page(dmat, bpage);
81166458Sdfr	}
81266458Sdfr}
81366458Sdfr
81466458Sdfrvoid
815115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
81666458Sdfr{
81766458Sdfr	struct bounce_page *bpage;
81866458Sdfr
81966458Sdfr	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
82066458Sdfr		/*
82166458Sdfr		 * Handle data bouncing.  We might also
82266458Sdfr		 * want to add support for invalidating
82366458Sdfr		 * the caches on broken hardware
82466458Sdfr		 */
825134928Smarcel
826113347Smux		if (op & BUS_DMASYNC_PREWRITE) {
82766458Sdfr			while (bpage != NULL) {
82866458Sdfr				bcopy((void *)bpage->datavaddr,
82966458Sdfr				      (void *)bpage->vaddr,
83066458Sdfr				      bpage->datacount);
83166458Sdfr				bpage = STAILQ_NEXT(bpage, links);
83266458Sdfr			}
833167277Sscottl			total_bounced++;
834113347Smux		}
83566458Sdfr
836113347Smux		if (op & BUS_DMASYNC_POSTREAD) {
83766458Sdfr			while (bpage != NULL) {
83866458Sdfr				bcopy((void *)bpage->vaddr,
83966458Sdfr				      (void *)bpage->datavaddr,
84066458Sdfr				      bpage->datacount);
84166458Sdfr				bpage = STAILQ_NEXT(bpage, links);
84266458Sdfr			}
843167277Sscottl			total_bounced++;
84466458Sdfr		}
84566458Sdfr	}
84666458Sdfr}
84766458Sdfr
848117133Smuxstatic void
849117133Smuxinit_bounce_pages(void *dummy __unused)
850117133Smux{
851117133Smux
852117133Smux	free_bpages = 0;
853117133Smux	reserved_bpages = 0;
854117133Smux	active_bpages = 0;
855117133Smux	total_bpages = 0;
856117133Smux	STAILQ_INIT(&bounce_page_list);
857117133Smux	STAILQ_INIT(&bounce_map_waitinglist);
858117133Smux	STAILQ_INIT(&bounce_map_callbacklist);
859117133Smux	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
860117133Smux}
861117133SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
862117133Smux
86366458Sdfrstatic int
86466458Sdfralloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
86566458Sdfr{
86666458Sdfr	int count;
86766458Sdfr
86866458Sdfr	count = 0;
86966458Sdfr	while (numpages > 0) {
87066458Sdfr		struct bounce_page *bpage;
87166458Sdfr
87266458Sdfr		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
87369781Sdwmalone						     M_NOWAIT | M_ZERO);
87466458Sdfr
87566458Sdfr		if (bpage == NULL)
87666458Sdfr			break;
87766458Sdfr		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
87866458Sdfr							 M_NOWAIT, 0ul,
87966458Sdfr							 dmat->lowaddr,
88066458Sdfr							 PAGE_SIZE,
881117129Smux							 dmat->boundary);
882117139Smux		if (bpage->vaddr == 0) {
88366458Sdfr			free(bpage, M_DEVBUF);
88466458Sdfr			break;
88566458Sdfr		}
88666458Sdfr		bpage->busaddr = pmap_kextract(bpage->vaddr);
887117133Smux		mtx_lock(&bounce_lock);
88866458Sdfr		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
88966458Sdfr		total_bpages++;
89066458Sdfr		free_bpages++;
891117133Smux		mtx_unlock(&bounce_lock);
89266458Sdfr		count++;
89366458Sdfr		numpages--;
89466458Sdfr	}
89566458Sdfr	return (count);
89666458Sdfr}
89766458Sdfr
89866458Sdfrstatic int
899117133Smuxreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
90066458Sdfr{
90166458Sdfr	int pages;
90266458Sdfr
903117133Smux	mtx_assert(&bounce_lock, MA_OWNED);
90466458Sdfr	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
905117133Smux	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
906117133Smux		return (map->pagesneeded - (map->pagesreserved + pages));
90766458Sdfr	free_bpages -= pages;
90866458Sdfr	reserved_bpages += pages;
90966458Sdfr	map->pagesreserved += pages;
91066458Sdfr	pages = map->pagesneeded - map->pagesreserved;
91166458Sdfr
91266458Sdfr	return (pages);
91366458Sdfr}
91466458Sdfr
915117139Smuxstatic bus_addr_t
91666458Sdfradd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
91766458Sdfr		bus_size_t size)
91866458Sdfr{
91966458Sdfr	struct bounce_page *bpage;
92066458Sdfr
921134928Smarcel	KASSERT(map != NULL && map != &nobounce_dmamap,
922134928Smarcel	    ("add_bounce_page: bad map %p", map));
923134928Smarcel
92466458Sdfr	if (map->pagesneeded == 0)
92566458Sdfr		panic("add_bounce_page: map doesn't need any pages");
92666458Sdfr	map->pagesneeded--;
92766458Sdfr
92866458Sdfr	if (map->pagesreserved == 0)
92966458Sdfr		panic("add_bounce_page: map doesn't need any pages");
93066458Sdfr	map->pagesreserved--;
93166458Sdfr
932117133Smux	mtx_lock(&bounce_lock);
93366458Sdfr	bpage = STAILQ_FIRST(&bounce_page_list);
93466458Sdfr	if (bpage == NULL)
93566458Sdfr		panic("add_bounce_page: free page list is empty");
93666458Sdfr
93766458Sdfr	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
93866458Sdfr	reserved_bpages--;
93966458Sdfr	active_bpages++;
940117133Smux	mtx_unlock(&bounce_lock);
94166458Sdfr
942188350Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
943188350Simp		/* page offset needs to be preserved */
944188350Simp		bpage->vaddr &= ~PAGE_MASK;
945188350Simp		bpage->busaddr &= ~PAGE_MASK;
946188350Simp		bpage->vaddr |= vaddr & PAGE_MASK;
947188350Simp		bpage->busaddr |= vaddr & PAGE_MASK;
948188350Simp	}
94966458Sdfr	bpage->datavaddr = vaddr;
95066458Sdfr	bpage->datacount = size;
95166458Sdfr	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
95266458Sdfr	return (bpage->busaddr);
95366458Sdfr}
95466458Sdfr
95566458Sdfrstatic void
95666458Sdfrfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
95766458Sdfr{
95866458Sdfr	struct bus_dmamap *map;
95966458Sdfr
96066458Sdfr	bpage->datavaddr = 0;
96166458Sdfr	bpage->datacount = 0;
96266458Sdfr
963117133Smux	mtx_lock(&bounce_lock);
96466458Sdfr	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
96566458Sdfr	free_bpages++;
96666458Sdfr	active_bpages--;
96766458Sdfr	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
968117133Smux		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
96966458Sdfr			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
97066458Sdfr			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
97166458Sdfr					   map, links);
97266458Sdfr			busdma_swi_pending = 1;
973134928Smarcel			total_deferred++;
97488900Sjhb			swi_sched(vm_ih, 0);
97566458Sdfr		}
97666458Sdfr	}
977117133Smux	mtx_unlock(&bounce_lock);
97866458Sdfr}
97966458Sdfr
98066458Sdfrvoid
98166458Sdfrbusdma_swi(void)
98266458Sdfr{
983117126Sscottl	bus_dma_tag_t dmat;
98466458Sdfr	struct bus_dmamap *map;
98566458Sdfr
986117133Smux	mtx_lock(&bounce_lock);
98766458Sdfr	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
98866458Sdfr		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
989117133Smux		mtx_unlock(&bounce_lock);
990117126Sscottl		dmat = map->dmat;
991117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
99266458Sdfr		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
99366458Sdfr				map->callback, map->callback_arg, /*flags*/0);
994117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
995117133Smux		mtx_lock(&bounce_lock);
99666458Sdfr	}
997117133Smux	mtx_unlock(&bounce_lock);
99866458Sdfr}
999