busdma_machdep.c revision 39243
162590Sitojun/*
2122615Sume * Copyright (c) 1997 Justin T. Gibbs.
362590Sitojun * All rights reserved.
455505Sshin *
555505Sshin * Redistribution and use in source and binary forms, with or without
655505Sshin * modification, are permitted provided that the following conditions
755505Sshin * are met:
855505Sshin * 1. Redistributions of source code must retain the above copyright
955505Sshin *    notice, this list of conditions, and the following disclaimer,
1055505Sshin *    without modification, immediately at the beginning of the file.
1155505Sshin * 2. The name of the author may not be used to endorse or promote products
1255505Sshin *    derived from this software without specific prior written permission.
1355505Sshin *
1455505Sshin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1555505Sshin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1655505Sshin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1755505Sshin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1855505Sshin * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1955505Sshin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2055505Sshin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2155505Sshin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2255505Sshin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2355505Sshin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2455505Sshin * SUCH DAMAGE.
2555505Sshin *
2655505Sshin *      $Id: busdma_machdep.c,v 1.7 1998/07/11 07:45:28 bde Exp $
2755505Sshin */
2855505Sshin
2955505Sshin#include <sys/param.h>
3055505Sshin#include <sys/systm.h>
3155505Sshin#include <sys/malloc.h>
3255505Sshin
3355505Sshin#include <vm/vm.h>
3455505Sshin#include <vm/vm_prot.h>
3555505Sshin#include <vm/vm_page.h>
3655505Sshin
3755505Sshin#include <machine/bus.h>
3855505Sshin#include <machine/md_var.h>
3955505Sshin
4055505Sshin#define MAX(a,b) (((a) > (b)) ? (a) : (b))
4155505Sshin#define MIN(a,b) (((a) < (b)) ? (a) : (b))
4255505Sshin#define MAX_BPAGES 128
4355505Sshin
4455505Sshinstruct bus_dma_tag {
4555505Sshin	bus_dma_tag_t	  parent;
4655505Sshin	bus_size_t	  alignment;
4755505Sshin	bus_size_t	  boundary;
4855505Sshin	bus_addr_t	  lowaddr;
4955505Sshin	bus_addr_t	  highaddr;
5055505Sshin	bus_dma_filter_t *filter;
5155505Sshin	void		 *filterarg;
5255505Sshin	bus_size_t	  maxsize;
5355505Sshin	u_int		  nsegments;
5455505Sshin	bus_size_t	  maxsegsz;
5555505Sshin	int		  flags;
5655505Sshin	int		  ref_count;
5755505Sshin	int		  map_count;
5855505Sshin};
5955505Sshin
6055505Sshinstruct bounce_page {
6155505Sshin	vm_offset_t	vaddr;		/* kva of bounce buffer */
6255505Sshin	bus_addr_t	busaddr;	/* Physical address */
6355505Sshin	vm_offset_t	datavaddr;	/* kva of client data */
6455505Sshin	bus_size_t	datacount;	/* client data count */
6555505Sshin	STAILQ_ENTRY(bounce_page) links;
6655505Sshin};
6755505Sshin
6855505Sshinint busdma_swi_pending;
6955505Sshin
7055505Sshinstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
7155505Sshinstatic int free_bpages;
7255505Sshinstatic int reserved_bpages;
7355505Sshinstatic int active_bpages;
7455505Sshinstatic int total_bpages;
7555505Sshinstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
7655505Sshin
7755505Sshinstruct bus_dmamap {
7855505Sshin	struct bp_list	       bpages;
7955505Sshin	int		       pagesneeded;
8055505Sshin	int		       pagesreserved;
8155505Sshin	bus_dma_tag_t	       dmat;
82253999Shrs	void		      *buf;		/* unmapped buffer pointer */
8378064Sume	bus_size_t	       buflen;		/* unmapped buffer length */
8455505Sshin	bus_dmamap_callback_t *callback;
8555505Sshin	void		      *callback_arg;
8655505Sshin	STAILQ_ENTRY(bus_dmamap) links;
8755505Sshin};
8855505Sshin
8955505Sshinstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
9055505Sshinstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
9155505Sshinstatic struct bus_dmamap nobounce_dmamap;
9255505Sshin
9355505Sshinstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
9455505Sshinstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
9555505Sshinstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
9655505Sshin				   vm_offset_t vaddr, bus_size_t size);
9755505Sshinstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
9855505Sshinstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
9955505Sshin
10055505Sshinstatic __inline int
10155505Sshinrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
10255505Sshin{
10355505Sshin	int retval;
10455505Sshin
10555505Sshin	retval = 0;
10655505Sshin	do {
10755505Sshin		if (paddr > dmat->lowaddr
10855505Sshin		 && paddr <= dmat->highaddr
10955505Sshin		 && (dmat->filter == NULL
11055505Sshin		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
11155505Sshin			retval = 1;
11255505Sshin
11362590Sitojun		dmat = dmat->parent;
11455505Sshin	} while (retval == 0 && dmat != NULL);
11562590Sitojun	return (retval);
11655505Sshin}
117186119Sqingli
118186119Sqingli#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
119196866Sbz/*
120186119Sqingli * Allocate a device specific dma_tag.
121186119Sqingli */
122100650Sjmallettint
12362590Sitojunbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
12462590Sitojun		   bus_size_t boundary, bus_addr_t lowaddr,
12562590Sitojun		   bus_addr_t highaddr, bus_dma_filter_t *filter,
12662590Sitojun		   void *filterarg, bus_size_t maxsize, int nsegments,
12762590Sitojun		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
12855505Sshin{
12962590Sitojun	bus_dma_tag_t newtag;
13062590Sitojun	int error = 0;
13162590Sitojun
13255505Sshin	/* Return a NULL tag on failure */
133173412Skevlo	*dmat = NULL;
134173412Skevlo
135173412Skevlo	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
136173412Skevlo	if (newtag == NULL)
137173412Skevlo		return (ENOMEM);
138173412Skevlo
139173412Skevlo	newtag->parent = parent;
140173412Skevlo	newtag->boundary = boundary;
141173412Skevlo	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
142173412Skevlo	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
143173412Skevlo	newtag->filter = filter;
144173412Skevlo	newtag->filterarg = filterarg;
145173412Skevlo	newtag->maxsize = maxsize;
146173412Skevlo	newtag->nsegments = nsegments;
147173412Skevlo	newtag->maxsegsz = maxsegsz;
148173412Skevlo	newtag->flags = flags;
149173412Skevlo	newtag->ref_count = 1; /* Count ourself */
150173412Skevlo	newtag->map_count = 0;
15162590Sitojun
152173412Skevlo	/* Take into account any restrictions imposed by our parent tag */
153173412Skevlo	if (parent != NULL) {
15462590Sitojun		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
155173412Skevlo		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
156253999Shrs		/*
15755505Sshin		 * XXX Not really correct??? Probably need to honor boundary
158122615Sume		 *     all the way up the inheritence chain.
15978064Sume		 */
16078064Sume		newtag->boundary = MAX(parent->boundary, newtag->boundary);
16178064Sume		if (newtag->filter == NULL) {
16278064Sume			/*
16378064Sume			 * Short circuit looking at our parent directly
16478064Sume			 * since we have encapsulated all of its information
165122615Sume			 */
16678064Sume			newtag->filter = parent->filter;
167122615Sume			newtag->filterarg = parent->filterarg;
168122615Sume			newtag->parent = parent->parent;
169122615Sume		}
17055505Sshin		if (newtag->parent != NULL) {
171259169Sae			parent->ref_count++;
17255505Sshin		}
17355505Sshin	}
17455505Sshin
17555505Sshin	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
17655505Sshin		/* Must bounce */
177122615Sume
178121156Sume		if (lowaddr > bounce_lowaddr) {
17955505Sshin			/*
18055505Sshin			 * Go through the pool and kill any pages
181122615Sume			 * that don't reside below lowaddr.
182122615Sume			 */
183122615Sume			panic("bus_dma_tag_create: page reallocation "
184122615Sume			      "not implemented");
185122615Sume		}
186122615Sume		if (ptoa(total_bpages) < maxsize) {
187122615Sume			int pages;
188122615Sume
189122615Sume			pages = atop(maxsize) - total_bpages;
190122615Sume
191122615Sume			/* Add pages to our bounce pool */
192122615Sume			if (alloc_bounce_pages(newtag, pages) < pages)
193122615Sume				error = ENOMEM;
19455505Sshin		}
19555505Sshin		/* Performed initial allocation */
196122615Sume		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
19755505Sshin	}
198122615Sume
19955505Sshin	if (error != 0) {
200122615Sume		free(newtag, M_DEVBUF);
201122615Sume	} else {
202122615Sume		*dmat = newtag;
203122615Sume	}
204122615Sume	return (error);
20555505Sshin}
20655505Sshin
20755505Sshinint
20855505Sshinbus_dma_tag_destroy(bus_dma_tag_t dmat)
20955505Sshin{
21055505Sshin	if (dmat != NULL) {
21155505Sshin
212122615Sume		if (dmat->map_count != 0)
213122615Sume			return (EBUSY);
214122615Sume
215122615Sume		while (dmat != NULL) {
216122615Sume			bus_dma_tag_t parent;
21755505Sshin
218122615Sume			parent = dmat->parent;
21955505Sshin			dmat->ref_count--;
220122615Sume			if (dmat->ref_count == 0) {
221122615Sume				free(dmat, M_DEVBUF);
22255505Sshin			}
22355505Sshin			dmat = parent;
22455505Sshin		}
22555505Sshin	}
22655505Sshin	return (0);
22755505Sshin}
22855505Sshin
22955505Sshin/*
230122615Sume * Allocate a handle for mapping from kva/uva/physical
231122615Sume * address space into bus device space.
232122615Sume */
233122615Sumeint
23455505Sshinbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
235122615Sume{
236122615Sume	int error;
237122615Sume
238122615Sume	error = 0;
239122615Sume
240122615Sume	if (dmat->lowaddr < ptoa(Maxmem)) {
241122615Sume		/* Must bounce */
242122615Sume		int maxpages;
243122615Sume
244122615Sume		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
245122615Sume					     M_NOWAIT);
246122615Sume		if (*mapp == NULL) {
247122615Sume			return (ENOMEM);
248122615Sume		} else {
249122615Sume			/* Initialize the new map */
250122615Sume			bzero(*mapp, sizeof(**mapp));
251122615Sume			STAILQ_INIT(&((*mapp)->bpages));
252122615Sume		}
253122615Sume		/*
254122615Sume		 * Attempt to add pages to our pool on a per-instance
255122615Sume		 * basis up to a sane limit.
256122615Sume		 */
257122615Sume		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
258122615Sume		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
259122615Sume		 || (dmat->map_count > 0
260122615Sume		  && total_bpages < maxpages)) {
261122615Sume			int pages;
262122615Sume
263122615Sume			if (dmat->lowaddr > bounce_lowaddr) {
264122615Sume				/*
265122615Sume				 * Go through the pool and kill any pages
266122615Sume				 * that don't reside below lowaddr.
267122615Sume				 */
268122615Sume				panic("bus_dmamap_create: page reallocation "
26955505Sshin				      "not implemented");
270122615Sume			}
271122615Sume			pages = atop(dmat->maxsize);
272122615Sume			pages = MIN(maxpages - total_bpages, pages);
273122615Sume			error = alloc_bounce_pages(dmat, pages);
274122615Sume
275122615Sume			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
276122615Sume				if (error == 0)
277122615Sume					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
278122615Sume			} else {
27955505Sshin				error = 0;
280122615Sume			}
281122615Sume		}
28255505Sshin	} else {
28355505Sshin		*mapp = &nobounce_dmamap;
28455505Sshin	}
285122615Sume	if (error == 0)
286122615Sume		dmat->map_count++;
287122615Sume	return (error);
288122615Sume}
289122615Sume
29055505Sshin/*
291122615Sume * Destroy a handle for mapping from kva/uva/physical
292122615Sume * address space into bus device space.
293122615Sume */
294122615Sumeint
295122615Sumebus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
296122615Sume{
29755505Sshin	if (map != NULL) {
298122615Sume		if (STAILQ_FIRST(&map->bpages) != NULL)
299122615Sume			return (EBUSY);
300122615Sume		free(map, M_DEVBUF);
301122615Sume	}
302122615Sume	dmat->map_count--;
303122615Sume	return (0);
30455505Sshin}
305122615Sume
306122615Sume
307122615Sume/*
308122615Sume * Allocate a piece of memory that can be efficiently mapped into
309122615Sume * bus device space based on the constraints lited in the dma tag.
310122615Sume * A dmamap to for use with dmamap_load is also allocated.
311122615Sume */
312122615Sumeint
31355505Sshinbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
31455505Sshin		 bus_dmamap_t *mapp)
31555505Sshin{
31655505Sshin	/* If we succeed, no mapping/bouncing will be required */
31755505Sshin	*mapp = &nobounce_dmamap;
31855505Sshin
31955505Sshin	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
32055505Sshin		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
321259169Sae				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
32255505Sshin	} else {
32355505Sshin		/*
32455505Sshin		 * XXX Use Contigmalloc until it is merged into this facility
32555505Sshin		 *     and handles multi-seg allocations.  Nobody is doing
32655505Sshin		 *     multi-seg allocations yet though.
32755505Sshin		 */
32855505Sshin		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
32955505Sshin				      (flags & BUS_DMA_NOWAIT)
33055505Sshin				      ? M_NOWAIT : M_WAITOK,
33155505Sshin				      0ul, dmat->lowaddr, 1ul, dmat->boundary);
33255505Sshin	}
33355505Sshin	if (*vaddr == NULL)
33455505Sshin		return (ENOMEM);
33555505Sshin	return (0);
33655505Sshin}
337167260Skevlo
338122615Sume/*
339122615Sume * Free a piece of memory and it's allociated dmamap, that was allocated
34055505Sshin * via bus_dmamem_alloc.
34155505Sshin */
34255505Sshinvoid
34355505Sshinbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
34455505Sshin{
34555505Sshin	/*
34655505Sshin	 * dmamem does not need to be bounced, so the map should be
34755505Sshin	 * NULL
34855505Sshin	 */
34955505Sshin	if (map != &nobounce_dmamap)
35055505Sshin		panic("bus_dmamem_free: Invalid map freed\n");
35155505Sshin	free(vaddr, M_DEVBUF);
35255505Sshin}
35355505Sshin
35455505Sshin#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
35555505Sshin
35655505Sshin/*
35755505Sshin * Map the buffer buf into bus space using the dmamap map.
358121156Sume */
359121156Sumeint
36055505Sshinbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
36155505Sshin		bus_size_t buflen, bus_dmamap_callback_t *callback,
36255505Sshin		void *callback_arg, int flags)
36355505Sshin{
36462590Sitojun	vm_offset_t		vaddr;
36555505Sshin	vm_offset_t		paddr;
36655505Sshin#ifdef __GNUC__
367253999Shrs	bus_dma_segment_t	dm_segments[dmat->nsegments];
368253999Shrs#else
36955505Sshin	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
37055505Sshin#endif
37155505Sshin	bus_dma_segment_t      *sg;
37255505Sshin	int			seg;
37355505Sshin	int			error;
37455505Sshin
37555505Sshin	error = 0;
37655505Sshin	/*
37755505Sshin	 * If we are being called during a callback, pagesneeded will
378259169Sae	 * be non-zero, so we can avoid doing the work twice.
37955505Sshin	 */
38055505Sshin	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
38155505Sshin		vm_offset_t	vendaddr;
38255505Sshin
38355505Sshin		/*
38455505Sshin		 * Count the number of bounce pages
38555505Sshin		 * needed in order to complete this transfer
38655505Sshin		 */
38755505Sshin		vaddr = trunc_page(buf);
38855505Sshin		vendaddr = (vm_offset_t)buf + buflen;
38955505Sshin
39055505Sshin		while (vaddr < vendaddr) {
39155505Sshin			paddr = pmap_kextract(vaddr);
39255505Sshin			if (run_filter(dmat, paddr) != 0) {
39355505Sshin
39455505Sshin				map->pagesneeded++;
39555505Sshin			}
39655505Sshin			vaddr += PAGE_SIZE;
39755505Sshin		}
39855505Sshin	}
39955505Sshin
40055505Sshin	/* Reserve Necessary Bounce Pages */
40155505Sshin	if (map->pagesneeded != 0) {
40255505Sshin		int s;
403243903Shrs
404243903Shrs		s = splhigh();
40555505Sshin	 	if (reserve_bounce_pages(dmat, map) != 0) {
40655505Sshin
40755505Sshin			/* Queue us for resources */
40855505Sshin			map->dmat = dmat;
40955505Sshin			map->buf = buf;
41055505Sshin			map->buflen = buflen;
411253999Shrs			map->callback = callback;
412121156Sume			map->callback_arg = callback_arg;
413253999Shrs
414253970Shrs			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
41562590Sitojun			splx(s);
41662590Sitojun
41755505Sshin			return (EINPROGRESS);
41855505Sshin		}
41955505Sshin		splx(s);
420121156Sume	}
421121156Sume
42255505Sshin	vaddr = (vm_offset_t)buf;
42355505Sshin	sg = &dm_segments[0];
42455505Sshin	seg = 1;
42555505Sshin	sg->ds_len = 0;
42655505Sshin
427122615Sume	do {
428122615Sume		bus_size_t	size;
429122615Sume		vm_offset_t	nextpaddr;	/* GCC warning expected */
430122615Sume
431210936Sjhb		paddr = pmap_kextract(vaddr);
432122615Sume		size = PAGE_SIZE - (paddr & PAGE_MASK);
433122615Sume		if (size > buflen)
43455505Sshin			size = buflen;
43562590Sitojun
43662590Sitojun		if (map->pagesneeded != 0
43755505Sshin		 && run_filter(dmat, paddr)) {
43862590Sitojun			paddr = add_bounce_page(dmat, map, vaddr, size);
43955505Sshin		}
44055505Sshin
44155505Sshin		if (sg->ds_len == 0) {
44255505Sshin			sg->ds_addr = paddr;
44355505Sshin			sg->ds_len = size;
44455505Sshin		} else if (paddr == nextpaddr) {
44555505Sshin			sg->ds_len += size;
44655505Sshin		} else {
44755505Sshin			/* Go to the next segment */
44855505Sshin			sg++;
44955505Sshin			seg++;
45055505Sshin			if (seg > dmat->nsegments)
45155505Sshin				break;
45255505Sshin			sg->ds_addr = paddr;
453259169Sae			sg->ds_len = size;
45455505Sshin		}
45555505Sshin		vaddr += size;
45655505Sshin		nextpaddr = paddr + size;
45755505Sshin		buflen -= size;
45855505Sshin	} while (buflen > 0);
45955505Sshin
46055505Sshin	if (buflen != 0) {
46155505Sshin		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
46255505Sshin		       buflen);
46355505Sshin		error = EFBIG;
46455505Sshin	}
465121156Sume
46655505Sshin	(*callback)(callback_arg, dm_segments, seg, error);
46755505Sshin
46855505Sshin	return (0);
469122615Sume}
47055505Sshin
47155505Sshin/*
472121156Sume * Release the mapping held by map.
473121156Sume */
47455505Sshinvoid
47555505Sshin_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
47655505Sshin{
47755505Sshin	struct bounce_page *bpage;
47855505Sshin
47955505Sshin	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
48055505Sshin		STAILQ_REMOVE_HEAD(&map->bpages, links);
48155505Sshin		free_bounce_page(dmat, bpage);
48255505Sshin	}
483259169Sae}
48455505Sshin
48555505Sshinvoid
48655505Sshin_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
487186119Sqingli{
48855505Sshin	struct bounce_page *bpage;
48955505Sshin
49055505Sshin	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
49155505Sshin
49255505Sshin		/*
49355505Sshin		 * Handle data bouncing.  We might also
49455505Sshin		 * want to add support for invalidating
49555505Sshin		 * the caches on broken hardware
49655505Sshin		 */
49755505Sshin		switch (op) {
49855505Sshin		case BUS_DMASYNC_PREWRITE:
49955505Sshin			while (bpage != NULL) {
500121156Sume				bcopy((void *)bpage->datavaddr,
50155505Sshin				      (void *)bpage->vaddr,
50255505Sshin				      bpage->datacount);
50355505Sshin				bpage = STAILQ_NEXT(bpage, links);
504243903Shrs			}
505243903Shrs			break;
50655505Sshin
507121156Sume		case BUS_DMASYNC_POSTREAD:
508121156Sume			while (bpage != NULL) {
50955505Sshin				bcopy((void *)bpage->vaddr,
51055505Sshin				      (void *)bpage->datavaddr,
51155505Sshin				      bpage->datacount);
51255505Sshin				bpage = STAILQ_NEXT(bpage, links);
51355505Sshin			}
51462590Sitojun			break;
51578064Sume		case BUS_DMASYNC_PREREAD:
51655505Sshin		case BUS_DMASYNC_POSTWRITE:
51762590Sitojun			/* No-ops */
51862590Sitojun			break;
51955505Sshin		}
52062590Sitojun	}
52155505Sshin}
52255505Sshin
52355505Sshinstatic int
52455505Sshinalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
52555505Sshin{
526186119Sqingli	int count;
527186119Sqingli
528186119Sqingli	count = 0;
529186119Sqingli	if (total_bpages == 0) {
530186119Sqingli		STAILQ_INIT(&bounce_page_list);
531186500Sqingli		STAILQ_INIT(&bounce_map_waitinglist);
53255505Sshin		STAILQ_INIT(&bounce_map_callbacklist);
533243903Shrs	}
534243903Shrs
535121156Sume	while (numpages > 0) {
536121156Sume		struct bounce_page *bpage;
53755505Sshin		int s;
53855505Sshin
53955505Sshin		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
54055505Sshin						     M_NOWAIT);
54155505Sshin
54255505Sshin		if (bpage == NULL)
543122615Sume			break;
54478064Sume		bzero(bpage, sizeof(*bpage));
54578064Sume		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
54678064Sume							 M_NOWAIT, 0ul,
54755505Sshin							 dmat->lowaddr,
54855505Sshin							 PAGE_SIZE,
54955505Sshin							 0);
55055505Sshin		if (bpage->vaddr == NULL) {
551259169Sae			free(bpage, M_DEVBUF);
55255505Sshin			break;
55355505Sshin		}
55455505Sshin		bpage->busaddr = pmap_kextract(bpage->vaddr);
55562590Sitojun		s = splhigh();
55655505Sshin		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
55755505Sshin		total_bpages++;
55855505Sshin		free_bpages++;
55955505Sshin		splx(s);
56055505Sshin		count++;
561253999Shrs		numpages--;
56255505Sshin	}
56378064Sume	return (count);
56478064Sume}
56562590Sitojun
56678064Sumestatic int
56755505Sshinreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
56855505Sshin{
56966865Ssumikawa	int pages;
570122615Sume
57178064Sume	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
572122615Sume	free_bpages -= pages;
57355505Sshin	reserved_bpages += pages;
57455505Sshin	map->pagesreserved += pages;
57555505Sshin	pages = map->pagesneeded - map->pagesreserved;
57655505Sshin
57755505Sshin	return (pages);
57855505Sshin}
57955505Sshin
580186119Sqinglistatic vm_offset_t
58155505Sshinadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
582186119Sqingli		bus_size_t size)
583186119Sqingli{
584186119Sqingli	int s;
58555505Sshin	struct bounce_page *bpage;
58655505Sshin
58755505Sshin	if (map->pagesneeded == 0)
58855505Sshin		panic("add_bounce_page: map doesn't need any pages");
589121156Sume	map->pagesneeded--;
59055505Sshin
59155505Sshin	if (map->pagesreserved == 0)
59255505Sshin		panic("add_bounce_page: map doesn't need any pages");
59355505Sshin	map->pagesreserved--;
59455505Sshin
59555505Sshin	s = splhigh();
59655505Sshin	bpage = STAILQ_FIRST(&bounce_page_list);
59755505Sshin	if (bpage == NULL)
59855505Sshin		panic("add_bounce_page: free page list is empty");
59955505Sshin
60055505Sshin	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
60155505Sshin	reserved_bpages--;
60278064Sume	active_bpages++;
60378064Sume	splx(s);
60478064Sume
60578064Sume	bpage->datavaddr = vaddr;
60678064Sume	bpage->datacount = size;
60778064Sume	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
60878064Sume	return (bpage->busaddr);
60978064Sume}
610121156Sume
61178064Sumestatic void
61278064Sumefree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
61378064Sume{
61478064Sume	int s;
61578064Sume	struct bus_dmamap *map;
61678064Sume
61778064Sume	bpage->datavaddr = 0;
61878064Sume	bpage->datacount = 0;
619122615Sume
620122615Sume	s = splhigh();
621122615Sume	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
62255505Sshin	free_bpages++;
62355505Sshin	active_bpages--;
62455505Sshin	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
62555505Sshin		if (reserve_bounce_pages(map->dmat, map) == 0) {
62655505Sshin			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
62755505Sshin			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
62855505Sshin					   map, links);
62955505Sshin			busdma_swi_pending = 1;
63055505Sshin			setsoftvm();
63155505Sshin		}
63255505Sshin	}
63355505Sshin	splx(s);
63455505Sshin}
635121156Sume
636122615Sumevoid
63781366Ssumikawabusdma_swi()
63881366Ssumikawa{
63981366Ssumikawa	int s;
640122615Sume	struct bus_dmamap *map;
641122615Sume
642122615Sume	s = splhigh();
64381366Ssumikawa	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
64466865Ssumikawa		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
64581366Ssumikawa		splx(s);
64666865Ssumikawa		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
64766865Ssumikawa				map->callback, map->callback_arg, /*flags*/0);
648253999Shrs		s = splhigh();
64955505Sshin	}
650253970Shrs	splx(s);
65155505Sshin}
65278064Sume