busdma_machdep.c revision 289701
1178172Simp/*-
2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko
3178172Simp * All rights reserved.
4178172Simp *
5178172Simp * Redistribution and use in source and binary forms, with or without
6178172Simp * modification, are permitted provided that the following conditions
7178172Simp * are met:
8178172Simp * 1. Redistributions of source code must retain the above copyright
9178172Simp *    notice, this list of conditions, and the following disclaimer,
10178172Simp *    without modification, immediately at the beginning of the file.
11178172Simp * 2. The name of the author may not be used to endorse or promote products
12178172Simp *    derived from this software without specific prior written permission.
13178172Simp *
14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24178172Simp * SUCH DAMAGE.
25178172Simp *
26202046Simp *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27178172Simp */
28178172Simp
29178172Simp#include <sys/cdefs.h>
30178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 289701 2015-10-21 15:06:48Z ian $");
31178172Simp
32202046Simp/*
33202046Simp * MIPS bus dma support routines
34202046Simp */
35202046Simp
36178172Simp#include <sys/param.h>
37178172Simp#include <sys/systm.h>
38178172Simp#include <sys/malloc.h>
39178172Simp#include <sys/bus.h>
40289701Sian#include <sys/busdma_bufalloc.h>
41178172Simp#include <sys/interrupt.h>
42178172Simp#include <sys/lock.h>
43178172Simp#include <sys/proc.h>
44246713Skib#include <sys/memdesc.h>
45178172Simp#include <sys/mutex.h>
46178172Simp#include <sys/ktr.h>
47178172Simp#include <sys/kernel.h>
48202046Simp#include <sys/sysctl.h>
49246713Skib#include <sys/uio.h>
50178172Simp
51289701Sian#include <vm/uma.h>
52178172Simp#include <vm/vm.h>
53289701Sian#include <vm/vm_extern.h>
54289701Sian#include <vm/vm_kern.h>
55178172Simp#include <vm/vm_page.h>
56178172Simp#include <vm/vm_map.h>
57178172Simp
58178172Simp#include <machine/atomic.h>
59178172Simp#include <machine/bus.h>
60178172Simp#include <machine/cache.h>
61178172Simp#include <machine/cpufunc.h>
62204689Sneel#include <machine/cpuinfo.h>
63202046Simp#include <machine/md_var.h>
64178172Simp
65202046Simp#define MAX_BPAGES 64
66202046Simp#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
67202046Simp#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
68202046Simp
69202046Simpstruct bounce_zone;
70202046Simp
71178172Simpstruct bus_dma_tag {
72178172Simp	bus_dma_tag_t		parent;
73178172Simp	bus_size_t		alignment;
74232356Sjhb	bus_addr_t		boundary;
75178172Simp	bus_addr_t		lowaddr;
76178172Simp	bus_addr_t		highaddr;
77178172Simp	bus_dma_filter_t	*filter;
78178172Simp	void			*filterarg;
79178172Simp	bus_size_t		maxsize;
80178172Simp	u_int			nsegments;
81178172Simp	bus_size_t		maxsegsz;
82178172Simp	int			flags;
83178172Simp	int			ref_count;
84178172Simp	int			map_count;
85178172Simp	bus_dma_lock_t		*lockfunc;
86178172Simp	void			*lockfuncarg;
87240177Sjhb	bus_dma_segment_t	*segments;
88202046Simp	struct bounce_zone *bounce_zone;
89178172Simp};
90178172Simp
91202046Simpstruct bounce_page {
92202046Simp	vm_offset_t	vaddr;		/* kva of bounce buffer */
93202046Simp	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
94202046Simp	bus_addr_t	busaddr;	/* Physical address */
95202046Simp	vm_offset_t	datavaddr;	/* kva of client data */
96246713Skib	bus_addr_t	dataaddr;	/* client physical address */
97202046Simp	bus_size_t	datacount;	/* client data count */
98202046Simp	STAILQ_ENTRY(bounce_page) links;
99202046Simp};
100202046Simp
101246713Skibstruct sync_list {
102246713Skib	vm_offset_t	vaddr;		/* kva of bounce buffer */
103246713Skib	bus_addr_t	busaddr;	/* Physical address */
104246713Skib	bus_size_t	datacount;	/* client data count */
105246713Skib};
106246713Skib
107202046Simpint busdma_swi_pending;
108202046Simp
109202046Simpstruct bounce_zone {
110202046Simp	STAILQ_ENTRY(bounce_zone) links;
111202046Simp	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112202046Simp	int		total_bpages;
113202046Simp	int		free_bpages;
114202046Simp	int		reserved_bpages;
115202046Simp	int		active_bpages;
116202046Simp	int		total_bounced;
117202046Simp	int		total_deferred;
118202046Simp	int		map_count;
119202046Simp	bus_size_t	alignment;
120202046Simp	bus_addr_t	lowaddr;
121202046Simp	char		zoneid[8];
122202046Simp	char		lowaddrid[20];
123202046Simp	struct sysctl_ctx_list sysctl_tree;
124202046Simp	struct sysctl_oid *sysctl_tree_top;
125202046Simp};
126202046Simp
127202046Simpstatic struct mtx bounce_lock;
128202046Simpstatic int total_bpages;
129202046Simpstatic int busdma_zonecount;
130202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
131202046Simp
132227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
133202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
134202046Simp	   "Total bounce pages");
135202046Simp
136289701Sian#define DMAMAP_UNCACHEABLE	0x08
137289701Sian#define DMAMAP_CACHE_ALIGNED	0x10
138202046Simp
139178172Simpstruct bus_dmamap {
140202046Simp	struct bp_list	bpages;
141202046Simp	int		pagesneeded;
142202046Simp	int		pagesreserved;
143212284Sjchandra	bus_dma_tag_t	dmat;
144246713Skib	struct memdesc	mem;
145178172Simp	int		flags;
146178172Simp	void		*origbuffer;
147178172Simp	void		*allocbuffer;
148178172Simp	TAILQ_ENTRY(bus_dmamap)	freelist;
149202046Simp	STAILQ_ENTRY(bus_dmamap) links;
150202046Simp	bus_dmamap_callback_t *callback;
151212284Sjchandra	void		*callback_arg;
152246713Skib	int		sync_count;
153246713Skib	struct sync_list *slist;
154178172Simp};
155178172Simp
156202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158202046Simp
159202046Simpstatic void init_bounce_pages(void *dummy);
160202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
161202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
162202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
163202046Simp				int commit);
164202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
165246713Skib				  vm_offset_t vaddr, bus_addr_t addr,
166246713Skib				  bus_size_t size);
167202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
168202046Simp
169202046Simp/* Default tag, as most drivers provide no parent tag. */
170202046Simpbus_dma_tag_t mips_root_dma_tag;
171202046Simp
172289701Sianstatic uma_zone_t dmamap_zone;	/* Cache of struct bus_dmamap items */
173289701Sian
174289701Sianstatic busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
175289701Sianstatic busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
176289701Sian
177289701SianMALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
178289701SianMALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
179289701Sian
180202046Simp/*
181289701Sian * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
182289701Sian * It'll need platform-specific changes if this code is copied.
183289701Sian */
184289701Sianstatic int
185289701Siandmamap_ctor(void *mem, int size, void *arg, int flags)
186289701Sian{
187289701Sian	bus_dmamap_t map;
188289701Sian	bus_dma_tag_t dmat;
189289701Sian
190289701Sian	map = (bus_dmamap_t)mem;
191289701Sian	dmat = (bus_dma_tag_t)arg;
192289701Sian
193289701Sian	dmat->map_count++;
194289701Sian
195289701Sian	map->dmat = dmat;
196289701Sian	map->flags = 0;
197289701Sian	map->slist = NULL;
198289701Sian	map->allocbuffer = NULL;
199289701Sian	map->sync_count = 0;
200289701Sian	STAILQ_INIT(&map->bpages);
201289701Sian
202289701Sian	return (0);
203289701Sian}
204289701Sian
205289701Sian/*
206289701Sian * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
207289701Sian * It may need platform-specific changes if this code is copied              .
208289701Sian */
209289701Sianstatic void
210289701Siandmamap_dtor(void *mem, int size, void *arg)
211289701Sian{
212289701Sian	bus_dmamap_t map;
213289701Sian
214289701Sian	map = (bus_dmamap_t)mem;
215289701Sian
216289701Sian	map->dmat->map_count--;
217289701Sian}
218289701Sian
219289701Sianstatic void
220289701Sianbusdma_init(void *dummy)
221289701Sian{
222289701Sian
223289701Sian	/* Create a cache of maps for bus_dmamap_create(). */
224289701Sian	dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
225289701Sian	    dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
226289701Sian
227289701Sian	/* Create a cache of buffers in standard (cacheable) memory. */
228289701Sian	standard_allocator = busdma_bufalloc_create("buffer",
229289701Sian	    mips_pdcache_linesize,	/* minimum_alignment */
230289701Sian	    NULL,			/* uma_alloc func */
231289701Sian	    NULL,			/* uma_free func */
232289701Sian	    0);				/* uma_zcreate_flags */
233289701Sian
234289701Sian	/*
235289701Sian	 * Create a cache of buffers in uncacheable memory, to implement the
236289701Sian	 * BUS_DMA_COHERENT flag.
237289701Sian	 */
238289701Sian	coherent_allocator = busdma_bufalloc_create("coherent",
239289701Sian	    mips_pdcache_linesize,	/* minimum_alignment */
240289701Sian	    busdma_bufalloc_alloc_uncacheable,
241289701Sian	    busdma_bufalloc_free_uncacheable,
242289701Sian	    0);				/* uma_zcreate_flags */
243289701Sian}
244289701SianSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
245289701Sian
246289701Sian/*
247202046Simp * Return true if a match is made.
248202046Simp *
249202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
250202046Simp *
251202046Simp * If paddr is within the bounds of the dma tag then call the filter callback
252202046Simp * to check for a match, if there is no filter callback then assume a match.
253202046Simp */
254202046Simpstatic int
255202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
256202046Simp{
257202046Simp	int retval;
258202046Simp
259202046Simp	retval = 0;
260202046Simp
261202046Simp	do {
262202046Simp		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
263202046Simp		 || ((paddr & (dmat->alignment - 1)) != 0))
264202046Simp		 && (dmat->filter == NULL
265202046Simp		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
266202046Simp			retval = 1;
267202046Simp
268202046Simp		dmat = dmat->parent;
269202046Simp	} while (retval == 0 && dmat != NULL);
270202046Simp	return (retval);
271202046Simp}
272202046Simp
273178172Simp/*
274178172Simp * Check to see if the specified page is in an allowed DMA range.
275178172Simp */
276178172Simp
277178172Simpstatic __inline int
278202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
279202046Simp{
280202046Simp	int i;
281202046Simp	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
282202046Simp		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
283202046Simp		    || (lowaddr < phys_avail[i] &&
284202046Simp		    highaddr > phys_avail[i]))
285202046Simp			return (1);
286202046Simp	}
287202046Simp	return (0);
288202046Simp}
289202046Simp
290178172Simp/*
291178172Simp * Convenience function for manipulating driver locks from busdma (during
292178172Simp * busdma_swi, for example).  Drivers that don't provide their own locks
293178172Simp * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
294178172Simp * non-mutex locking scheme don't have to use this at all.
295178172Simp */
296178172Simpvoid
297178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
298178172Simp{
299178172Simp	struct mtx *dmtx;
300178172Simp
301178172Simp	dmtx = (struct mtx *)arg;
302178172Simp	switch (op) {
303178172Simp	case BUS_DMA_LOCK:
304178172Simp		mtx_lock(dmtx);
305178172Simp		break;
306178172Simp	case BUS_DMA_UNLOCK:
307178172Simp		mtx_unlock(dmtx);
308178172Simp		break;
309178172Simp	default:
310178172Simp		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
311178172Simp	}
312178172Simp}
313178172Simp
314178172Simp/*
315178172Simp * dflt_lock should never get called.  It gets put into the dma tag when
316178172Simp * lockfunc == NULL, which is only valid if the maps that are associated
317178172Simp * with the tag are meant to never be defered.
318178172Simp * XXX Should have a way to identify which driver is responsible here.
319178172Simp */
320178172Simpstatic void
321178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op)
322178172Simp{
323178172Simp#ifdef INVARIANTS
324178172Simp	panic("driver error: busdma dflt_lock called");
325178172Simp#else
326178172Simp	printf("DRIVER_ERROR: busdma dflt_lock called\n");
327178172Simp#endif
328178172Simp}
329178172Simp
330178172Simpstatic __inline bus_dmamap_t
331246713Skib_busdma_alloc_dmamap(bus_dma_tag_t dmat)
332178172Simp{
333246713Skib	struct sync_list *slist;
334178172Simp	bus_dmamap_t map;
335178172Simp
336289701Sian	slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
337246713Skib	if (slist == NULL)
338246713Skib		return (NULL);
339289701Sian	map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
340289701Sian	if (map != NULL)
341246713Skib		map->slist = slist;
342289701Sian	else
343246713Skib		free(slist, M_DEVBUF);
344178172Simp	return (map);
345178172Simp}
346178172Simp
347178172Simpstatic __inline void
348178172Simp_busdma_free_dmamap(bus_dmamap_t map)
349178172Simp{
350246713Skib	free(map->slist, M_DEVBUF);
351289701Sian	uma_zfree(dmamap_zone, map);
352178172Simp}
353178172Simp
354202046Simp/*
355202046Simp * Allocate a device specific dma_tag.
356202046Simp */
357202046Simp#define SEG_NB 1024
358202046Simp
359178172Simpint
360178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
361232356Sjhb    bus_addr_t boundary, bus_addr_t lowaddr,
362212284Sjchandra    bus_addr_t highaddr, bus_dma_filter_t *filter,
363212284Sjchandra    void *filterarg, bus_size_t maxsize, int nsegments,
364212284Sjchandra    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
365212284Sjchandra    void *lockfuncarg, bus_dma_tag_t *dmat)
366178172Simp{
367178172Simp	bus_dma_tag_t newtag;
368178172Simp	int error = 0;
369178172Simp	/* Return a NULL tag on failure */
370178172Simp	*dmat = NULL;
371202046Simp	if (!parent)
372202046Simp		parent = mips_root_dma_tag;
373178172Simp
374289701Sian	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
375178172Simp	if (newtag == NULL) {
376178172Simp		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
377178172Simp		    __func__, newtag, 0, error);
378178172Simp		return (ENOMEM);
379178172Simp	}
380178172Simp
381178172Simp	newtag->parent = parent;
382178172Simp	newtag->alignment = alignment;
383178172Simp	newtag->boundary = boundary;
384202046Simp	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
385202046Simp	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
386178172Simp	newtag->filter = filter;
387178172Simp	newtag->filterarg = filterarg;
388212284Sjchandra	newtag->maxsize = maxsize;
389212284Sjchandra	newtag->nsegments = nsegments;
390178172Simp	newtag->maxsegsz = maxsegsz;
391178172Simp	newtag->flags = flags;
392204689Sneel	if (cpuinfo.cache_coherent_dma)
393204689Sneel		newtag->flags |= BUS_DMA_COHERENT;
394178172Simp	newtag->ref_count = 1; /* Count ourself */
395178172Simp	newtag->map_count = 0;
396178172Simp	if (lockfunc != NULL) {
397178172Simp		newtag->lockfunc = lockfunc;
398178172Simp		newtag->lockfuncarg = lockfuncarg;
399178172Simp	} else {
400178172Simp		newtag->lockfunc = dflt_lock;
401178172Simp		newtag->lockfuncarg = NULL;
402178172Simp	}
403240177Sjhb	newtag->segments = NULL;
404240177Sjhb
405212284Sjchandra	/*
406202046Simp	 * Take into account any restrictions imposed by our parent tag
407202046Simp	 */
408212284Sjchandra	if (parent != NULL) {
409232356Sjhb		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
410232356Sjhb		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
411178172Simp		if (newtag->boundary == 0)
412178172Simp			newtag->boundary = parent->boundary;
413178172Simp		else if (parent->boundary != 0)
414212284Sjchandra			newtag->boundary =
415232356Sjhb			    MIN(parent->boundary, newtag->boundary);
416202046Simp		if ((newtag->filter != NULL) ||
417202046Simp		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
418202046Simp			newtag->flags |= BUS_DMA_COULD_BOUNCE;
419212284Sjchandra		if (newtag->filter == NULL) {
420212284Sjchandra			/*
421212284Sjchandra			* Short circuit looking at our parent directly
422212284Sjchandra			* since we have encapsulated all of its information
423212284Sjchandra			*/
424212284Sjchandra			newtag->filter = parent->filter;
425212284Sjchandra			newtag->filterarg = parent->filterarg;
426212284Sjchandra			newtag->parent = parent->parent;
427178172Simp		}
428178172Simp		if (newtag->parent != NULL)
429178172Simp			atomic_add_int(&parent->ref_count, 1);
430178172Simp	}
431202046Simp	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
432202046Simp	 || newtag->alignment > 1)
433202046Simp		newtag->flags |= BUS_DMA_COULD_BOUNCE;
434178172Simp
435202046Simp	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
436202046Simp	    (flags & BUS_DMA_ALLOCNOW) != 0) {
437202046Simp		struct bounce_zone *bz;
438202046Simp
439202046Simp		/* Must bounce */
440202046Simp
441202046Simp		if ((error = alloc_bounce_zone(newtag)) != 0) {
442202046Simp			free(newtag, M_DEVBUF);
443202046Simp			return (error);
444202046Simp		}
445202046Simp		bz = newtag->bounce_zone;
446202046Simp
447202046Simp		if (ptoa(bz->total_bpages) < maxsize) {
448202046Simp			int pages;
449202046Simp
450202046Simp			pages = atop(maxsize) - bz->total_bpages;
451202046Simp
452202046Simp			/* Add pages to our bounce pool */
453202046Simp			if (alloc_bounce_pages(newtag, pages) < pages)
454202046Simp				error = ENOMEM;
455202046Simp		}
456202046Simp		/* Performed initial allocation */
457202046Simp		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
458202046Simp	} else
459202046Simp		newtag->bounce_zone = NULL;
460202046Simp	if (error != 0)
461178172Simp		free(newtag, M_DEVBUF);
462202046Simp	else
463178172Simp		*dmat = newtag;
464178172Simp	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
465178172Simp	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
466202046Simp
467178172Simp	return (error);
468178172Simp}
469178172Simp
470178172Simpint
471178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat)
472178172Simp{
473178172Simp#ifdef KTR
474178172Simp	bus_dma_tag_t dmat_copy = dmat;
475178172Simp#endif
476178172Simp
477178172Simp	if (dmat != NULL) {
478212284Sjchandra		if (dmat->map_count != 0)
479212284Sjchandra			return (EBUSY);
480178172Simp
481212284Sjchandra		while (dmat != NULL) {
482212284Sjchandra			bus_dma_tag_t parent;
483178172Simp
484212284Sjchandra			parent = dmat->parent;
485212284Sjchandra			atomic_subtract_int(&dmat->ref_count, 1);
486212284Sjchandra			if (dmat->ref_count == 0) {
487240177Sjhb				if (dmat->segments != NULL)
488240177Sjhb					free(dmat->segments, M_DEVBUF);
489212284Sjchandra				free(dmat, M_DEVBUF);
490212284Sjchandra				/*
491212284Sjchandra				 * Last reference count, so
492212284Sjchandra				 * release our reference
493212284Sjchandra				 * count on our parent.
494212284Sjchandra				 */
495212284Sjchandra				dmat = parent;
496212284Sjchandra			} else
497240177Sjhb				dmat = NULL;
498212284Sjchandra		}
499212284Sjchandra	}
500178172Simp	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
501178172Simp
502212284Sjchandra	return (0);
503178172Simp}
504178172Simp
505202046Simp#include <sys/kdb.h>
506178172Simp/*
507178172Simp * Allocate a handle for mapping from kva/uva/physical
508178172Simp * address space into bus device space.
509178172Simp */
510178172Simpint
511178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
512178172Simp{
513178172Simp	bus_dmamap_t newmap;
514178172Simp	int error = 0;
515178172Simp
516240177Sjhb	if (dmat->segments == NULL) {
517240177Sjhb		dmat->segments = (bus_dma_segment_t *)malloc(
518289701Sian		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
519240177Sjhb		    M_NOWAIT);
520240177Sjhb		if (dmat->segments == NULL) {
521240177Sjhb			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
522240177Sjhb			    __func__, dmat, ENOMEM);
523240177Sjhb			return (ENOMEM);
524240177Sjhb		}
525240177Sjhb	}
526240177Sjhb
527246713Skib	newmap = _busdma_alloc_dmamap(dmat);
528178172Simp	if (newmap == NULL) {
529178172Simp		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
530178172Simp		return (ENOMEM);
531178172Simp	}
532178172Simp	*mapp = newmap;
533178172Simp
534202046Simp	/*
535202046Simp	 * Bouncing might be required if the driver asks for an active
536202046Simp	 * exclusion region, a data alignment that is stricter than 1, and/or
537202046Simp	 * an active address boundary.
538202046Simp	 */
539202046Simp	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
540202046Simp
541202046Simp		/* Must bounce */
542202046Simp		struct bounce_zone *bz;
543202046Simp		int maxpages;
544202046Simp
545202046Simp		if (dmat->bounce_zone == NULL) {
546202046Simp			if ((error = alloc_bounce_zone(dmat)) != 0) {
547202046Simp				_busdma_free_dmamap(newmap);
548202046Simp				*mapp = NULL;
549202046Simp				return (error);
550202046Simp			}
551202046Simp		}
552202046Simp		bz = dmat->bounce_zone;
553202046Simp
554202046Simp		/* Initialize the new map */
555202046Simp		STAILQ_INIT(&((*mapp)->bpages));
556202046Simp
557202046Simp		/*
558202046Simp		 * Attempt to add pages to our pool on a per-instance
559202046Simp		 * basis up to a sane limit.
560202046Simp		 */
561202046Simp		maxpages = MAX_BPAGES;
562202046Simp		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
563202046Simp		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
564202046Simp			int pages;
565202046Simp
566202046Simp			pages = MAX(atop(dmat->maxsize), 1);
567202046Simp			pages = MIN(maxpages - bz->total_bpages, pages);
568202046Simp			pages = MAX(pages, 1);
569202046Simp			if (alloc_bounce_pages(dmat, pages) < pages)
570202046Simp				error = ENOMEM;
571202046Simp
572202046Simp			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
573202046Simp				if (error == 0)
574202046Simp					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
575202046Simp			} else {
576202046Simp				error = 0;
577202046Simp			}
578202046Simp		}
579202046Simp		bz->map_count++;
580202046Simp	}
581202046Simp
582178172Simp	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
583178172Simp	    __func__, dmat, dmat->flags, error);
584178172Simp
585178172Simp	return (0);
586178172Simp}
587178172Simp
588178172Simp/*
589178172Simp * Destroy a handle for mapping from kva/uva/physical
590178172Simp * address space into bus device space.
591178172Simp */
592178172Simpint
593178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
594178172Simp{
595202046Simp
596246713Skib	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
597202046Simp		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
598202046Simp		    __func__, dmat, EBUSY);
599202046Simp		return (EBUSY);
600202046Simp	}
601202046Simp	if (dmat->bounce_zone)
602202046Simp		dmat->bounce_zone->map_count--;
603242465Sadrian	_busdma_free_dmamap(map);
604178172Simp	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
605178172Simp        return (0);
606178172Simp}
607178172Simp
608178172Simp/*
609178172Simp * Allocate a piece of memory that can be efficiently mapped into
610178172Simp * bus device space based on the constraints lited in the dma tag.
611178172Simp * A dmamap to for use with dmamap_load is also allocated.
612178172Simp */
613178172Simpint
614289701Sianbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
615212284Sjchandra    bus_dmamap_t *mapp)
616178172Simp{
617178172Simp	bus_dmamap_t newmap = NULL;
618289701Sian	busdma_bufalloc_t ba;
619289701Sian	struct busdma_bufzone *bufzone;
620289701Sian	vm_memattr_t memattr;
621289701Sian	void *vaddr;
622178172Simp
623178172Simp	int mflags;
624178172Simp
625178172Simp	if (flags & BUS_DMA_NOWAIT)
626178172Simp		mflags = M_NOWAIT;
627178172Simp	else
628178172Simp		mflags = M_WAITOK;
629240177Sjhb	if (dmat->segments == NULL) {
630240177Sjhb		dmat->segments = (bus_dma_segment_t *)malloc(
631289701Sian		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
632240177Sjhb		    mflags);
633240177Sjhb		if (dmat->segments == NULL) {
634240177Sjhb			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
635240177Sjhb			    __func__, dmat, dmat->flags, ENOMEM);
636240177Sjhb			return (ENOMEM);
637240177Sjhb		}
638240177Sjhb	}
639178172Simp
640246713Skib	newmap = _busdma_alloc_dmamap(dmat);
641178172Simp	if (newmap == NULL) {
642178172Simp		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
643178172Simp		    __func__, dmat, dmat->flags, ENOMEM);
644178172Simp		return (ENOMEM);
645178172Simp	}
646202046Simp
647204689Sneel	/*
648204689Sneel	 * If all the memory is coherent with DMA then we don't need to
649204689Sneel	 * do anything special for a coherent mapping request.
650204689Sneel	 */
651204689Sneel	if (dmat->flags & BUS_DMA_COHERENT)
652204689Sneel	    flags &= ~BUS_DMA_COHERENT;
653204689Sneel
654289701Sian	if (flags & BUS_DMA_COHERENT) {
655289701Sian		memattr = VM_MEMATTR_UNCACHEABLE;
656289701Sian		ba = coherent_allocator;
657289701Sian		newmap->flags |= DMAMAP_UNCACHEABLE;
658289701Sian	} else {
659289701Sian		memattr = VM_MEMATTR_DEFAULT;
660289701Sian		ba = standard_allocator;
661289701Sian	}
662289701Sian	/* All buffers we allocate are cache-aligned. */
663289701Sian	newmap->flags |= DMAMAP_CACHE_ALIGNED;
664289701Sian
665289701Sian	if (flags & BUS_DMA_ZERO)
666289701Sian		mflags |= M_ZERO;
667289701Sian
668204689Sneel	/*
669289701Sian	 * Try to find a bufzone in the allocator that holds a cache of buffers
670289701Sian	 * of the right size for this request.  If the buffer is too big to be
671289701Sian	 * held in the allocator cache, this returns NULL.
672204689Sneel	 */
673289701Sian	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
674204689Sneel
675289701Sian	/*
676289701Sian	 * Allocate the buffer from the uma(9) allocator if...
677289701Sian	 *  - It's small enough to be in the allocator (bufzone not NULL).
678289701Sian	 *  - The alignment constraint isn't larger than the allocation size
679289701Sian	 *    (the allocator aligns buffers to their size boundaries).
680289701Sian	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
681289701Sian	 * else allocate non-contiguous pages if...
682289701Sian	 *  - The page count that could get allocated doesn't exceed nsegments.
683289701Sian	 *  - The alignment constraint isn't larger than a page boundary.
684289701Sian	 *  - There are no boundary-crossing constraints.
685289701Sian	 * else allocate a block of contiguous pages because one or more of the
686289701Sian	 * constraints is something that only the contig allocator can fulfill.
687289701Sian	 */
688289701Sian	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
689289701Sian	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
690289701Sian		vaddr = uma_zalloc(bufzone->umazone, mflags);
691289701Sian	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
692289701Sian	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
693289701Sian		vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
694289701Sian		    mflags, 0, dmat->lowaddr, memattr);
695212284Sjchandra	} else {
696289701Sian		vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
697289701Sian		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
698289701Sian		    memattr);
699212284Sjchandra	}
700289701Sian	if (vaddr == NULL) {
701289701Sian		_busdma_free_dmamap(newmap);
702289701Sian		newmap = NULL;
703289701Sian	} else {
704289701Sian		newmap->sync_count = 0;
705178172Simp	}
706289701Sian	*vaddrp = vaddr;
707289701Sian	*mapp = newmap;
708202046Simp
709289701Sian	return (vaddr == NULL ? ENOMEM : 0);
710178172Simp}
711178172Simp
712178172Simp/*
713178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated
714178172Simp * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
715178172Simp */
716178172Simpvoid
717178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
718178172Simp{
719289701Sian	struct busdma_bufzone *bufzone;
720289701Sian	busdma_bufalloc_t ba;
721202046Simp
722212283Sjchandra	if (map->flags & DMAMAP_UNCACHEABLE)
723289701Sian		ba = coherent_allocator;
724212284Sjchandra	else
725289701Sian		ba = standard_allocator;
726202046Simp
727289701Sian	free(map->slist, M_DEVBUF);
728289701Sian	uma_zfree(dmamap_zone, map);
729289701Sian
730289701Sian	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
731289701Sian
732289701Sian	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
733289701Sian	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
734289701Sian		uma_zfree(bufzone->umazone, vaddr);
735289701Sian	else
736289701Sian		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
737178172Simp	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
738202046Simp}
739178172Simp
740246713Skibstatic void
741246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
742246713Skib    bus_size_t buflen, int flags)
743246713Skib{
744246713Skib	bus_addr_t curaddr;
745246713Skib	bus_size_t sgsize;
746246713Skib
747246713Skib	if ((map->pagesneeded == 0)) {
748246713Skib		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
749246713Skib		    dmat->lowaddr, dmat->boundary, dmat->alignment);
750246713Skib		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
751246713Skib		    map, map->pagesneeded);
752246713Skib		/*
753246713Skib		 * Count the number of bounce pages
754246713Skib		 * needed in order to complete this transfer
755246713Skib		 */
756246713Skib		curaddr = buf;
757246713Skib		while (buflen != 0) {
758246713Skib			sgsize = MIN(buflen, dmat->maxsegsz);
759246713Skib			if (run_filter(dmat, curaddr) != 0) {
760246713Skib				sgsize = MIN(sgsize, PAGE_SIZE);
761246713Skib				map->pagesneeded++;
762246713Skib			}
763246713Skib			curaddr += sgsize;
764246713Skib			buflen -= sgsize;
765246713Skib		}
766246713Skib		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
767246713Skib	}
768246713Skib}
769246713Skib
770246713Skibstatic void
771202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
772202046Simp    void *buf, bus_size_t buflen, int flags)
773202046Simp{
774202046Simp	vm_offset_t vaddr;
775202046Simp	vm_offset_t vendaddr;
776202046Simp	bus_addr_t paddr;
777202046Simp
778202046Simp	if ((map->pagesneeded == 0)) {
779202046Simp		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
780202046Simp		    dmat->lowaddr, dmat->boundary, dmat->alignment);
781202046Simp		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
782202046Simp		    map, map->pagesneeded);
783202046Simp		/*
784202046Simp		 * Count the number of bounce pages
785202046Simp		 * needed in order to complete this transfer
786202046Simp		 */
787206405Snwhitehorn		vaddr = (vm_offset_t)buf;
788202046Simp		vendaddr = (vm_offset_t)buf + buflen;
789202046Simp
790202046Simp		while (vaddr < vendaddr) {
791206405Snwhitehorn			bus_size_t sg_len;
792206405Snwhitehorn
793202046Simp			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
794206405Snwhitehorn			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
795202046Simp			paddr = pmap_kextract(vaddr);
796202046Simp			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
797206405Snwhitehorn			    run_filter(dmat, paddr) != 0) {
798206405Snwhitehorn				sg_len = roundup2(sg_len, dmat->alignment);
799202046Simp				map->pagesneeded++;
800206405Snwhitehorn			}
801206405Snwhitehorn			vaddr += sg_len;
802202046Simp		}
803202046Simp		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
804202046Simp	}
805246713Skib}
806202046Simp
807246713Skibstatic int
808246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
809246713Skib{
810246713Skib
811202046Simp	/* Reserve Necessary Bounce Pages */
812246713Skib	mtx_lock(&bounce_lock);
813246713Skib	if (flags & BUS_DMA_NOWAIT) {
814246713Skib		if (reserve_bounce_pages(dmat, map, 0) != 0) {
815246713Skib			mtx_unlock(&bounce_lock);
816246713Skib			return (ENOMEM);
817202046Simp		}
818246713Skib	} else {
819246713Skib		if (reserve_bounce_pages(dmat, map, 1) != 0) {
820246713Skib			/* Queue us for resources */
821246713Skib			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
822246713Skib			    map, links);
823246713Skib			mtx_unlock(&bounce_lock);
824246713Skib			return (EINPROGRESS);
825246713Skib		}
826202046Simp	}
827246713Skib	mtx_unlock(&bounce_lock);
828202046Simp
829202046Simp	return (0);
830178172Simp}
831178172Simp
832178172Simp/*
833246713Skib * Add a single contiguous physical range to the segment list.
834246713Skib */
835246713Skibstatic int
836246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
837246713Skib    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
838246713Skib{
839246713Skib	bus_addr_t baddr, bmask;
840246713Skib	int seg;
841246713Skib
842246713Skib	/*
843246713Skib	 * Make sure we don't cross any boundaries.
844246713Skib	 */
845246713Skib	bmask = ~(dmat->boundary - 1);
846246713Skib	if (dmat->boundary > 0) {
847246713Skib		baddr = (curaddr + dmat->boundary) & bmask;
848246713Skib		if (sgsize > (baddr - curaddr))
849246713Skib			sgsize = (baddr - curaddr);
850246713Skib	}
851246713Skib	/*
852246713Skib	 * Insert chunk into a segment, coalescing with
853246713Skib	 * the previous segment if possible.
854246713Skib	 */
855246713Skib	seg = *segp;
856246713Skib	if (seg >= 0 &&
857246713Skib	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
858246713Skib	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
859246713Skib	    (dmat->boundary == 0 ||
860246713Skib	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
861246713Skib		segs[seg].ds_len += sgsize;
862246713Skib	} else {
863246713Skib		if (++seg >= dmat->nsegments)
864246713Skib			return (0);
865246713Skib		segs[seg].ds_addr = curaddr;
866246713Skib		segs[seg].ds_len = sgsize;
867246713Skib	}
868246713Skib	*segp = seg;
869246713Skib	return (sgsize);
870246713Skib}
871246713Skib
872246713Skib/*
873246713Skib * Utility function to load a physical buffer.  segp contains
874246713Skib * the starting segment on entrace, and the ending segment on exit.
875246713Skib */
876246713Skibint
877246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
878246713Skib    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
879246713Skib    int *segp)
880246713Skib{
881246713Skib	bus_addr_t curaddr;
882246713Skib	bus_size_t sgsize;
883246713Skib	int error;
884246713Skib
885246713Skib	if (segs == NULL)
886246713Skib		segs = dmat->segments;
887246713Skib
888246713Skib	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
889246713Skib		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
890246713Skib		if (map->pagesneeded != 0) {
891246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
892246713Skib			if (error)
893246713Skib				return (error);
894246713Skib		}
895246713Skib	}
896246713Skib
897246713Skib	while (buflen > 0) {
898246713Skib		curaddr = buf;
899246713Skib		sgsize = MIN(buflen, dmat->maxsegsz);
900246713Skib		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
901246713Skib		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
902246713Skib			sgsize = MIN(sgsize, PAGE_SIZE);
903246713Skib			curaddr = add_bounce_page(dmat, map, 0, curaddr,
904246713Skib			    sgsize);
905246713Skib		}
906246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
907246713Skib		    segp);
908246713Skib		if (sgsize == 0)
909246713Skib			break;
910246713Skib		buf += sgsize;
911246713Skib		buflen -= sgsize;
912246713Skib	}
913246713Skib
914246713Skib	/*
915246713Skib	 * Did we fit?
916246713Skib	 */
917246713Skib	if (buflen != 0) {
918246713Skib		_bus_dmamap_unload(dmat, map);
919246713Skib		return (EFBIG); /* XXX better return value here? */
920246713Skib	}
921246713Skib	return (0);
922246713Skib}
923246713Skib
924257228Skibint
925257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
926257228Skib    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
927257228Skib    bus_dma_segment_t *segs, int *segp)
928257228Skib{
929257228Skib
930257228Skib	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
931257228Skib	    segs, segp));
932257228Skib}
933257228Skib
934246713Skib/*
935246713Skib * Utility function to load a linear buffer.  segp contains
936178172Simp * the starting segment on entrance, and the ending segment on exit.
937178172Simp * first indicates if this is the first invocation of this function.
938178172Simp */
939246713Skibint
940246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
941246713Skib    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
942246713Skib    int *segp)
943178172Simp{
944178172Simp	bus_size_t sgsize;
945246713Skib	bus_addr_t curaddr;
946246713Skib	struct sync_list *sl;
947178172Simp	vm_offset_t vaddr = (vm_offset_t)buf;
948178172Simp	int error = 0;
949178172Simp
950178172Simp
951246713Skib	if (segs == NULL)
952246713Skib		segs = dmat->segments;
953246713Skib
954202046Simp	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
955246713Skib		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
956246713Skib		if (map->pagesneeded != 0) {
957246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
958246713Skib			if (error)
959246713Skib				return (error);
960246713Skib		}
961202046Simp	}
962202046Simp	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
963202046Simp	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
964202046Simp
965246713Skib	while (buflen > 0) {
966178172Simp		/*
967178172Simp		 * Get the physical address for this segment.
968202046Simp		 *
969202046Simp		 * XXX Don't support checking for coherent mappings
970202046Simp		 * XXX in user address space.
971178172Simp		 */
972178172Simp		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
973178172Simp		curaddr = pmap_kextract(vaddr);
974178172Simp
975178172Simp		/*
976178172Simp		 * Compute the segment size, and adjust counts.
977178172Simp		 */
978178172Simp		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
979202046Simp		if (sgsize > dmat->maxsegsz)
980202046Simp			sgsize = dmat->maxsegsz;
981178172Simp		if (buflen < sgsize)
982178172Simp			sgsize = buflen;
983178172Simp
984202046Simp		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
985202046Simp		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
986246713Skib			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
987246713Skib			    sgsize);
988178172Simp		} else {
989246713Skib			sl = &map->slist[map->sync_count - 1];
990246713Skib			if (map->sync_count == 0 ||
991246713Skib			    vaddr != sl->vaddr + sl->datacount) {
992246713Skib				if (++map->sync_count > dmat->nsegments)
993246713Skib					goto cleanup;
994246713Skib				sl++;
995246713Skib				sl->vaddr = vaddr;
996246713Skib				sl->datacount = sgsize;
997246713Skib				sl->busaddr = curaddr;
998246713Skib			} else
999246713Skib				sl->datacount += sgsize;
1000178172Simp		}
1001246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1002246713Skib		    segp);
1003246713Skib		if (sgsize == 0)
1004178172Simp			break;
1005178172Simp		vaddr += sgsize;
1006178172Simp		buflen -= sgsize;
1007178172Simp	}
1008178172Simp
1009246713Skibcleanup:
1010178172Simp	/*
1011178172Simp	 * Did we fit?
1012178172Simp	 */
1013246713Skib	if (buflen != 0) {
1014246713Skib		_bus_dmamap_unload(dmat, map);
1015202046Simp		error = EFBIG; /* XXX better return value here? */
1016246713Skib	}
1017202046Simp	return (error);
1018178172Simp}
1019178172Simp
1020246713Skibvoid
1021246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1022246713Skib    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
1023178172Simp{
1024178172Simp
1025178172Simp	KASSERT(dmat != NULL, ("dmatag is NULL"));
1026178172Simp	KASSERT(map != NULL, ("dmamap is NULL"));
1027246713Skib	map->mem = *mem;
1028202046Simp	map->callback = callback;
1029202046Simp	map->callback_arg = callback_arg;
1030178172Simp}
1031178172Simp
1032246713Skibbus_dma_segment_t *
1033246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1034246713Skib    bus_dma_segment_t *segs, int nsegs, int error)
1035178172Simp{
1036178172Simp
1037246713Skib	if (segs == NULL)
1038246713Skib		segs = dmat->segments;
1039246713Skib	return (segs);
1040178172Simp}
1041178172Simp
1042178172Simp/*
1043178172Simp * Release the mapping held by map.
1044178172Simp */
1045178172Simpvoid
1046178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1047178172Simp{
1048202046Simp	struct bounce_page *bpage;
1049178172Simp
1050202046Simp	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1051202046Simp		STAILQ_REMOVE_HEAD(&map->bpages, links);
1052202046Simp		free_bounce_page(dmat, bpage);
1053202046Simp	}
1054246713Skib	map->sync_count = 0;
1055178172Simp	return;
1056178172Simp}
1057178172Simp
1058202046Simpstatic void
1059289701Sianbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
1060178172Simp{
1061202046Simp	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1062202046Simp	vm_offset_t buf_cl, buf_clend;
1063202046Simp	vm_size_t size_cl, size_clend;
1064202046Simp	int cache_linesize_mask = mips_pdcache_linesize - 1;
1065178172Simp
1066202046Simp	/*
1067202046Simp	 * dcache invalidation operates on cache line aligned addresses
1068202046Simp	 * and could modify areas of memory that share the same cache line
1069202046Simp	 * at the beginning and the ending of the buffer. In order to
1070202046Simp	 * prevent a data loss we save these chunks in temporary buffer
1071289701Sian	 * before invalidation and restore them afer it.
1072289701Sian	 *
1073289701Sian	 * If the aligned flag is set the buffer came from our allocator caches
1074289701Sian	 * which are always sized and aligned to cacheline boundaries, so we can
1075289701Sian	 * skip preserving nearby data if a transfer is unaligned (especially
1076289701Sian	 * it's likely to not end on a boundary).
1077202046Simp	 */
1078289701Sian	if (aligned) {
1079289701Sian		size_cl = 0;
1080289701Sian		size_clend = 0;
1081289701Sian	} else {
1082289701Sian		buf_cl = buf & ~cache_linesize_mask;
1083289701Sian		size_cl = buf & cache_linesize_mask;
1084289701Sian		buf_clend = buf + len;
1085289701Sian		size_clend = (mips_pdcache_linesize -
1086289701Sian		    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1087289701Sian	}
1088202046Simp
1089178172Simp	switch (op) {
1090202046Simp	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1091202046Simp	case BUS_DMASYNC_POSTREAD:
1092202046Simp
1093202046Simp		/*
1094202046Simp		 * Save buffers that might be modified by invalidation
1095202046Simp		 */
1096202046Simp		if (size_cl)
1097202046Simp			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1098202046Simp		if (size_clend)
1099202046Simp			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1100246713Skib		mips_dcache_inv_range(buf, len);
1101202046Simp		/*
1102202046Simp		 * Restore them
1103202046Simp		 */
1104202046Simp		if (size_cl)
1105202046Simp			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1106202046Simp		if (size_clend)
1107202046Simp			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1108203080Skan		/*
1109203080Skan		 * Copies above have brought corresponding memory
1110203080Skan		 * cache lines back into dirty state. Write them back
1111203080Skan		 * out and invalidate affected cache lines again if
1112203080Skan		 * necessary.
1113203080Skan		 */
1114203080Skan		if (size_cl)
1115246713Skib			mips_dcache_wbinv_range(buf_cl, size_cl);
1116203080Skan		if (size_clend && (size_cl == 0 ||
1117203080Skan                    buf_clend - buf_cl > mips_pdcache_linesize))
1118246713Skib			mips_dcache_wbinv_range(buf_clend, size_clend);
1119202046Simp		break;
1120202046Simp
1121178172Simp	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1122246713Skib		mips_dcache_wbinv_range(buf_cl, len);
1123178172Simp		break;
1124178172Simp
1125178172Simp	case BUS_DMASYNC_PREREAD:
1126202046Simp		/*
1127202046Simp		 * Save buffers that might be modified by invalidation
1128202046Simp		 */
1129202046Simp		if (size_cl)
1130202046Simp			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1131202046Simp		if (size_clend)
1132202046Simp			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1133246713Skib		mips_dcache_inv_range(buf, len);
1134202046Simp		/*
1135202046Simp		 * Restore them
1136202046Simp		 */
1137202046Simp		if (size_cl)
1138202046Simp			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1139202046Simp		if (size_clend)
1140202046Simp			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1141203080Skan		/*
1142203080Skan		 * Copies above have brought corresponding memory
1143203080Skan		 * cache lines back into dirty state. Write them back
1144203080Skan		 * out and invalidate affected cache lines again if
1145203080Skan		 * necessary.
1146203080Skan		 */
1147203080Skan		if (size_cl)
1148246713Skib			mips_dcache_wbinv_range(buf_cl, size_cl);
1149203080Skan		if (size_clend && (size_cl == 0 ||
1150203080Skan                    buf_clend - buf_cl > mips_pdcache_linesize))
1151246713Skib			mips_dcache_wbinv_range(buf_clend, size_clend);
1152178172Simp		break;
1153178172Simp
1154178172Simp	case BUS_DMASYNC_PREWRITE:
1155246713Skib		mips_dcache_wb_range(buf, len);
1156178172Simp		break;
1157178172Simp	}
1158178172Simp}
1159178172Simp
1160202046Simpstatic void
1161202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1162202046Simp{
1163202046Simp	struct bounce_page *bpage;
1164202046Simp
1165202046Simp	STAILQ_FOREACH(bpage, &map->bpages, links) {
1166202046Simp		if (op & BUS_DMASYNC_PREWRITE) {
1167246713Skib			if (bpage->datavaddr != 0)
1168246713Skib				bcopy((void *)bpage->datavaddr,
1169246713Skib				    (void *)(bpage->vaddr_nocache != 0 ?
1170246713Skib					     bpage->vaddr_nocache :
1171246713Skib					     bpage->vaddr),
1172246713Skib				    bpage->datacount);
1173246713Skib			else
1174246713Skib				physcopyout(bpage->dataaddr,
1175246713Skib				    (void *)(bpage->vaddr_nocache != 0 ?
1176246713Skib					     bpage->vaddr_nocache :
1177246713Skib					     bpage->vaddr),
1178246713Skib				    bpage->datacount);
1179202046Simp			if (bpage->vaddr_nocache == 0) {
1180202046Simp				mips_dcache_wb_range(bpage->vaddr,
1181202046Simp				    bpage->datacount);
1182202046Simp			}
1183202046Simp			dmat->bounce_zone->total_bounced++;
1184202046Simp		}
1185202046Simp		if (op & BUS_DMASYNC_POSTREAD) {
1186202046Simp			if (bpage->vaddr_nocache == 0) {
1187202046Simp				mips_dcache_inv_range(bpage->vaddr,
1188202046Simp				    bpage->datacount);
1189202046Simp			}
1190246713Skib			if (bpage->datavaddr != 0)
1191246713Skib				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1192246713Skib				    bpage->vaddr_nocache : bpage->vaddr),
1193246713Skib				    (void *)bpage->datavaddr, bpage->datacount);
1194246713Skib			else
1195246713Skib				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1196246713Skib				    bpage->vaddr_nocache : bpage->vaddr),
1197246713Skib				    bpage->dataaddr, bpage->datacount);
1198202046Simp			dmat->bounce_zone->total_bounced++;
1199202046Simp		}
1200202046Simp	}
1201202046Simp}
1202202046Simp
1203178172Simpvoid
1204178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1205178172Simp{
1206246713Skib	struct sync_list *sl, *end;
1207289701Sian	int aligned;
1208178172Simp
1209202046Simp	if (op == BUS_DMASYNC_POSTWRITE)
1210178172Simp		return;
1211202046Simp	if (STAILQ_FIRST(&map->bpages))
1212202046Simp		_bus_dmamap_sync_bp(dmat, map, op);
1213204689Sneel
1214204689Sneel	if (dmat->flags & BUS_DMA_COHERENT)
1215202046Simp		return;
1216204689Sneel
1217204689Sneel	if (map->flags & DMAMAP_UNCACHEABLE)
1218204689Sneel		return;
1219204689Sneel
1220289701Sian	aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
1221289701Sian
1222178172Simp	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1223246713Skib	if (map->sync_count) {
1224246713Skib		end = &map->slist[map->sync_count];
1225246713Skib		for (sl = &map->slist[0]; sl != end; sl++)
1226289701Sian			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
1227289701Sian			    aligned);
1228178172Simp	}
1229178172Simp}
1230202046Simp
1231202046Simpstatic void
1232202046Simpinit_bounce_pages(void *dummy __unused)
1233202046Simp{
1234202046Simp
1235202046Simp	total_bpages = 0;
1236202046Simp	STAILQ_INIT(&bounce_zone_list);
1237202046Simp	STAILQ_INIT(&bounce_map_waitinglist);
1238202046Simp	STAILQ_INIT(&bounce_map_callbacklist);
1239202046Simp	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1240202046Simp}
1241202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1242202046Simp
1243202046Simpstatic struct sysctl_ctx_list *
1244202046Simpbusdma_sysctl_tree(struct bounce_zone *bz)
1245202046Simp{
1246202046Simp	return (&bz->sysctl_tree);
1247202046Simp}
1248202046Simp
1249202046Simpstatic struct sysctl_oid *
1250202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz)
1251202046Simp{
1252202046Simp	return (bz->sysctl_tree_top);
1253202046Simp}
1254202046Simp
1255202046Simpstatic int
1256202046Simpalloc_bounce_zone(bus_dma_tag_t dmat)
1257202046Simp{
1258202046Simp	struct bounce_zone *bz;
1259202046Simp
1260202046Simp	/* Check to see if we already have a suitable zone */
1261202046Simp	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1262202046Simp		if ((dmat->alignment <= bz->alignment)
1263202046Simp		 && (dmat->lowaddr >= bz->lowaddr)) {
1264202046Simp			dmat->bounce_zone = bz;
1265202046Simp			return (0);
1266202046Simp		}
1267202046Simp	}
1268202046Simp
1269289701Sian	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
1270202046Simp	    M_NOWAIT | M_ZERO)) == NULL)
1271202046Simp		return (ENOMEM);
1272202046Simp
1273202046Simp	STAILQ_INIT(&bz->bounce_page_list);
1274202046Simp	bz->free_bpages = 0;
1275202046Simp	bz->reserved_bpages = 0;
1276202046Simp	bz->active_bpages = 0;
1277202046Simp	bz->lowaddr = dmat->lowaddr;
1278202046Simp	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1279202046Simp	bz->map_count = 0;
1280202046Simp	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1281202046Simp	busdma_zonecount++;
1282202046Simp	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1283202046Simp	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1284202046Simp	dmat->bounce_zone = bz;
1285202046Simp
1286202046Simp	sysctl_ctx_init(&bz->sysctl_tree);
1287202046Simp	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1288202046Simp	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1289202046Simp	    CTLFLAG_RD, 0, "");
1290202046Simp	if (bz->sysctl_tree_top == NULL) {
1291202046Simp		sysctl_ctx_free(&bz->sysctl_tree);
1292202046Simp		return (0);	/* XXX error code? */
1293202046Simp	}
1294202046Simp
1295202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1296202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1297202046Simp	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1298202046Simp	    "Total bounce pages");
1299202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1300202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1301202046Simp	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1302202046Simp	    "Free bounce pages");
1303202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1304202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1305202046Simp	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1306202046Simp	    "Reserved bounce pages");
1307202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1308202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1309202046Simp	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1310202046Simp	    "Active bounce pages");
1311202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1312202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1313202046Simp	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1314202046Simp	    "Total bounce requests");
1315202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1316202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1317202046Simp	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1318202046Simp	    "Total bounce requests that were deferred");
1319202046Simp	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1320202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1321202046Simp	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1322273377Shselasky	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1323202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1324273377Shselasky	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1325202046Simp
1326202046Simp	return (0);
1327202046Simp}
1328202046Simp
1329202046Simpstatic int
1330202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1331202046Simp{
1332202046Simp	struct bounce_zone *bz;
1333202046Simp	int count;
1334202046Simp
1335202046Simp	bz = dmat->bounce_zone;
1336202046Simp	count = 0;
1337202046Simp	while (numpages > 0) {
1338202046Simp		struct bounce_page *bpage;
1339202046Simp
1340289701Sian		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
1341202046Simp						     M_NOWAIT | M_ZERO);
1342202046Simp
1343202046Simp		if (bpage == NULL)
1344202046Simp			break;
1345289701Sian		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
1346202046Simp							 M_NOWAIT, 0ul,
1347202046Simp							 bz->lowaddr,
1348202046Simp							 PAGE_SIZE,
1349202046Simp							 0);
1350202046Simp		if (bpage->vaddr == 0) {
1351202046Simp			free(bpage, M_DEVBUF);
1352202046Simp			break;
1353202046Simp		}
1354202046Simp		bpage->busaddr = pmap_kextract(bpage->vaddr);
1355202046Simp		bpage->vaddr_nocache =
1356212283Sjchandra		    (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
1357202046Simp		mtx_lock(&bounce_lock);
1358202046Simp		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1359202046Simp		total_bpages++;
1360202046Simp		bz->total_bpages++;
1361202046Simp		bz->free_bpages++;
1362202046Simp		mtx_unlock(&bounce_lock);
1363202046Simp		count++;
1364202046Simp		numpages--;
1365202046Simp	}
1366202046Simp	return (count);
1367202046Simp}
1368202046Simp
1369202046Simpstatic int
1370202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1371202046Simp{
1372202046Simp	struct bounce_zone *bz;
1373202046Simp	int pages;
1374202046Simp
1375202046Simp	mtx_assert(&bounce_lock, MA_OWNED);
1376202046Simp	bz = dmat->bounce_zone;
1377202046Simp	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1378202046Simp	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1379202046Simp		return (map->pagesneeded - (map->pagesreserved + pages));
1380202046Simp	bz->free_bpages -= pages;
1381202046Simp	bz->reserved_bpages += pages;
1382202046Simp	map->pagesreserved += pages;
1383202046Simp	pages = map->pagesneeded - map->pagesreserved;
1384202046Simp
1385202046Simp	return (pages);
1386202046Simp}
1387202046Simp
1388202046Simpstatic bus_addr_t
1389202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1390246713Skib		bus_addr_t addr, bus_size_t size)
1391202046Simp{
1392202046Simp	struct bounce_zone *bz;
1393202046Simp	struct bounce_page *bpage;
1394202046Simp
1395202046Simp	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1396202046Simp	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1397202046Simp
1398202046Simp	bz = dmat->bounce_zone;
1399202046Simp	if (map->pagesneeded == 0)
1400202046Simp		panic("add_bounce_page: map doesn't need any pages");
1401202046Simp	map->pagesneeded--;
1402202046Simp
1403202046Simp	if (map->pagesreserved == 0)
1404202046Simp		panic("add_bounce_page: map doesn't need any pages");
1405202046Simp	map->pagesreserved--;
1406202046Simp
1407202046Simp	mtx_lock(&bounce_lock);
1408202046Simp	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1409202046Simp	if (bpage == NULL)
1410202046Simp		panic("add_bounce_page: free page list is empty");
1411202046Simp
1412202046Simp	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1413202046Simp	bz->reserved_bpages--;
1414202046Simp	bz->active_bpages++;
1415202046Simp	mtx_unlock(&bounce_lock);
1416202046Simp
1417202046Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1418202046Simp		/* Page offset needs to be preserved. */
1419282120Shselasky		bpage->vaddr |= addr & PAGE_MASK;
1420282120Shselasky		bpage->busaddr |= addr & PAGE_MASK;
1421202046Simp	}
1422202046Simp	bpage->datavaddr = vaddr;
1423246713Skib	bpage->dataaddr = addr;
1424202046Simp	bpage->datacount = size;
1425202046Simp	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1426202046Simp	return (bpage->busaddr);
1427202046Simp}
1428202046Simp
1429202046Simpstatic void
1430202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1431202046Simp{
1432202046Simp	struct bus_dmamap *map;
1433202046Simp	struct bounce_zone *bz;
1434202046Simp
1435202046Simp	bz = dmat->bounce_zone;
1436202046Simp	bpage->datavaddr = 0;
1437202046Simp	bpage->datacount = 0;
1438202046Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1439202046Simp		/*
1440202046Simp		 * Reset the bounce page to start at offset 0.  Other uses
1441202046Simp		 * of this bounce page may need to store a full page of
1442202046Simp		 * data and/or assume it starts on a page boundary.
1443202046Simp		 */
1444202046Simp		bpage->vaddr &= ~PAGE_MASK;
1445202046Simp		bpage->busaddr &= ~PAGE_MASK;
1446202046Simp	}
1447202046Simp
1448202046Simp	mtx_lock(&bounce_lock);
1449202046Simp	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1450202046Simp	bz->free_bpages++;
1451202046Simp	bz->active_bpages--;
1452202046Simp	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1453202046Simp		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1454202046Simp			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1455202046Simp			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1456202046Simp					   map, links);
1457202046Simp			busdma_swi_pending = 1;
1458202046Simp			bz->total_deferred++;
1459202046Simp			swi_sched(vm_ih, 0);
1460202046Simp		}
1461202046Simp	}
1462202046Simp	mtx_unlock(&bounce_lock);
1463202046Simp}
1464202046Simp
1465202046Simpvoid
1466202046Simpbusdma_swi(void)
1467202046Simp{
1468202046Simp	bus_dma_tag_t dmat;
1469202046Simp	struct bus_dmamap *map;
1470202046Simp
1471202046Simp	mtx_lock(&bounce_lock);
1472202046Simp	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1473202046Simp		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1474202046Simp		mtx_unlock(&bounce_lock);
1475202046Simp		dmat = map->dmat;
1476202046Simp		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1477246713Skib		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1478246713Skib		    map->callback_arg, BUS_DMA_WAITOK);
1479202046Simp		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1480202046Simp		mtx_lock(&bounce_lock);
1481202046Simp	}
1482202046Simp	mtx_unlock(&bounce_lock);
1483202046Simp}
1484