busdma_machdep.c revision 289716
1178172Simp/*-
2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko
3178172Simp * All rights reserved.
4178172Simp *
5178172Simp * Redistribution and use in source and binary forms, with or without
6178172Simp * modification, are permitted provided that the following conditions
7178172Simp * are met:
8178172Simp * 1. Redistributions of source code must retain the above copyright
9178172Simp *    notice, this list of conditions, and the following disclaimer,
10178172Simp *    without modification, immediately at the beginning of the file.
11178172Simp * 2. The name of the author may not be used to endorse or promote products
12178172Simp *    derived from this software without specific prior written permission.
13178172Simp *
14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24178172Simp * SUCH DAMAGE.
25178172Simp *
26202046Simp *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27178172Simp */
28178172Simp
29178172Simp#include <sys/cdefs.h>
30178172Simp__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 289716 2015-10-21 19:24:20Z ian $");
31178172Simp
32202046Simp/*
33202046Simp * MIPS bus dma support routines
34202046Simp */
35202046Simp
36178172Simp#include <sys/param.h>
37178172Simp#include <sys/systm.h>
38178172Simp#include <sys/malloc.h>
39178172Simp#include <sys/bus.h>
40289701Sian#include <sys/busdma_bufalloc.h>
41178172Simp#include <sys/interrupt.h>
42178172Simp#include <sys/lock.h>
43178172Simp#include <sys/proc.h>
44246713Skib#include <sys/memdesc.h>
45178172Simp#include <sys/mutex.h>
46178172Simp#include <sys/ktr.h>
47178172Simp#include <sys/kernel.h>
48202046Simp#include <sys/sysctl.h>
49246713Skib#include <sys/uio.h>
50178172Simp
51289701Sian#include <vm/uma.h>
52178172Simp#include <vm/vm.h>
53289701Sian#include <vm/vm_extern.h>
54289701Sian#include <vm/vm_kern.h>
55178172Simp#include <vm/vm_page.h>
56178172Simp#include <vm/vm_map.h>
57178172Simp
58178172Simp#include <machine/atomic.h>
59178172Simp#include <machine/bus.h>
60178172Simp#include <machine/cache.h>
61178172Simp#include <machine/cpufunc.h>
62204689Sneel#include <machine/cpuinfo.h>
63202046Simp#include <machine/md_var.h>
64178172Simp
65202046Simp#define MAX_BPAGES 64
66202046Simp#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
67202046Simp#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
68202046Simp
69202046Simpstruct bounce_zone;
70202046Simp
71178172Simpstruct bus_dma_tag {
72178172Simp	bus_dma_tag_t		parent;
73178172Simp	bus_size_t		alignment;
74232356Sjhb	bus_addr_t		boundary;
75178172Simp	bus_addr_t		lowaddr;
76178172Simp	bus_addr_t		highaddr;
77178172Simp	bus_dma_filter_t	*filter;
78178172Simp	void			*filterarg;
79178172Simp	bus_size_t		maxsize;
80178172Simp	u_int			nsegments;
81178172Simp	bus_size_t		maxsegsz;
82178172Simp	int			flags;
83178172Simp	int			ref_count;
84178172Simp	int			map_count;
85178172Simp	bus_dma_lock_t		*lockfunc;
86178172Simp	void			*lockfuncarg;
87240177Sjhb	bus_dma_segment_t	*segments;
88202046Simp	struct bounce_zone *bounce_zone;
89178172Simp};
90178172Simp
91202046Simpstruct bounce_page {
92202046Simp	vm_offset_t	vaddr;		/* kva of bounce buffer */
93202046Simp	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
94202046Simp	bus_addr_t	busaddr;	/* Physical address */
95202046Simp	vm_offset_t	datavaddr;	/* kva of client data */
96246713Skib	bus_addr_t	dataaddr;	/* client physical address */
97202046Simp	bus_size_t	datacount;	/* client data count */
98202046Simp	STAILQ_ENTRY(bounce_page) links;
99202046Simp};
100202046Simp
101246713Skibstruct sync_list {
102246713Skib	vm_offset_t	vaddr;		/* kva of bounce buffer */
103246713Skib	bus_addr_t	busaddr;	/* Physical address */
104246713Skib	bus_size_t	datacount;	/* client data count */
105246713Skib};
106246713Skib
107202046Simpint busdma_swi_pending;
108202046Simp
109202046Simpstruct bounce_zone {
110202046Simp	STAILQ_ENTRY(bounce_zone) links;
111202046Simp	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112202046Simp	int		total_bpages;
113202046Simp	int		free_bpages;
114202046Simp	int		reserved_bpages;
115202046Simp	int		active_bpages;
116202046Simp	int		total_bounced;
117202046Simp	int		total_deferred;
118202046Simp	int		map_count;
119202046Simp	bus_size_t	alignment;
120202046Simp	bus_addr_t	lowaddr;
121202046Simp	char		zoneid[8];
122202046Simp	char		lowaddrid[20];
123202046Simp	struct sysctl_ctx_list sysctl_tree;
124202046Simp	struct sysctl_oid *sysctl_tree_top;
125202046Simp};
126202046Simp
127202046Simpstatic struct mtx bounce_lock;
128202046Simpstatic int total_bpages;
129202046Simpstatic int busdma_zonecount;
130202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
131202046Simp
132227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
133202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
134202046Simp	   "Total bounce pages");
135202046Simp
136289701Sian#define DMAMAP_UNCACHEABLE	0x08
137289701Sian#define DMAMAP_CACHE_ALIGNED	0x10
138202046Simp
139178172Simpstruct bus_dmamap {
140202046Simp	struct bp_list	bpages;
141202046Simp	int		pagesneeded;
142202046Simp	int		pagesreserved;
143212284Sjchandra	bus_dma_tag_t	dmat;
144246713Skib	struct memdesc	mem;
145178172Simp	int		flags;
146178172Simp	void		*origbuffer;
147178172Simp	void		*allocbuffer;
148178172Simp	TAILQ_ENTRY(bus_dmamap)	freelist;
149202046Simp	STAILQ_ENTRY(bus_dmamap) links;
150202046Simp	bus_dmamap_callback_t *callback;
151212284Sjchandra	void		*callback_arg;
152246713Skib	int		sync_count;
153246713Skib	struct sync_list *slist;
154178172Simp};
155178172Simp
156202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158202046Simp
159202046Simpstatic void init_bounce_pages(void *dummy);
160202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
161202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
162202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
163202046Simp				int commit);
164202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
165246713Skib				  vm_offset_t vaddr, bus_addr_t addr,
166246713Skib				  bus_size_t size);
167202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
168202046Simp
169202046Simp/* Default tag, as most drivers provide no parent tag. */
170202046Simpbus_dma_tag_t mips_root_dma_tag;
171202046Simp
172289701Sianstatic uma_zone_t dmamap_zone;	/* Cache of struct bus_dmamap items */
173289701Sian
174289701Sianstatic busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
175289701Sianstatic busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
176289701Sian
177289701SianMALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
178289701SianMALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
179289701Sian
180202046Simp/*
181289701Sian * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
182289701Sian * It'll need platform-specific changes if this code is copied.
183289701Sian */
184289701Sianstatic int
185289701Siandmamap_ctor(void *mem, int size, void *arg, int flags)
186289701Sian{
187289701Sian	bus_dmamap_t map;
188289701Sian	bus_dma_tag_t dmat;
189289701Sian
190289701Sian	map = (bus_dmamap_t)mem;
191289701Sian	dmat = (bus_dma_tag_t)arg;
192289701Sian
193289701Sian	dmat->map_count++;
194289701Sian
195289701Sian	map->dmat = dmat;
196289701Sian	map->flags = 0;
197289701Sian	map->slist = NULL;
198289701Sian	map->allocbuffer = NULL;
199289701Sian	map->sync_count = 0;
200289701Sian	STAILQ_INIT(&map->bpages);
201289701Sian
202289701Sian	return (0);
203289701Sian}
204289701Sian
205289701Sian/*
206289701Sian * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
207289701Sian * It may need platform-specific changes if this code is copied              .
208289701Sian */
209289701Sianstatic void
210289701Siandmamap_dtor(void *mem, int size, void *arg)
211289701Sian{
212289701Sian	bus_dmamap_t map;
213289701Sian
214289701Sian	map = (bus_dmamap_t)mem;
215289701Sian
216289701Sian	map->dmat->map_count--;
217289701Sian}
218289701Sian
219289701Sianstatic void
220289701Sianbusdma_init(void *dummy)
221289701Sian{
222289701Sian
223289701Sian	/* Create a cache of maps for bus_dmamap_create(). */
224289701Sian	dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
225289701Sian	    dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
226289701Sian
227289701Sian	/* Create a cache of buffers in standard (cacheable) memory. */
228289701Sian	standard_allocator = busdma_bufalloc_create("buffer",
229289701Sian	    mips_pdcache_linesize,	/* minimum_alignment */
230289701Sian	    NULL,			/* uma_alloc func */
231289701Sian	    NULL,			/* uma_free func */
232289701Sian	    0);				/* uma_zcreate_flags */
233289701Sian
234289701Sian	/*
235289701Sian	 * Create a cache of buffers in uncacheable memory, to implement the
236289701Sian	 * BUS_DMA_COHERENT flag.
237289701Sian	 */
238289701Sian	coherent_allocator = busdma_bufalloc_create("coherent",
239289701Sian	    mips_pdcache_linesize,	/* minimum_alignment */
240289701Sian	    busdma_bufalloc_alloc_uncacheable,
241289701Sian	    busdma_bufalloc_free_uncacheable,
242289701Sian	    0);				/* uma_zcreate_flags */
243289701Sian}
244289701SianSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
245289701Sian
246289701Sian/*
247202046Simp * Return true if a match is made.
248202046Simp *
249202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
250202046Simp *
251202046Simp * If paddr is within the bounds of the dma tag then call the filter callback
252202046Simp * to check for a match, if there is no filter callback then assume a match.
253202046Simp */
254202046Simpstatic int
255202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
256202046Simp{
257202046Simp	int retval;
258202046Simp
259202046Simp	retval = 0;
260202046Simp
261202046Simp	do {
262202046Simp		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
263202046Simp		 || ((paddr & (dmat->alignment - 1)) != 0))
264202046Simp		 && (dmat->filter == NULL
265202046Simp		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
266202046Simp			retval = 1;
267202046Simp
268202046Simp		dmat = dmat->parent;
269202046Simp	} while (retval == 0 && dmat != NULL);
270202046Simp	return (retval);
271202046Simp}
272202046Simp
273178172Simp/*
274178172Simp * Check to see if the specified page is in an allowed DMA range.
275178172Simp */
276178172Simp
277178172Simpstatic __inline int
278202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
279202046Simp{
280202046Simp	int i;
281202046Simp	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
282202046Simp		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
283202046Simp		    || (lowaddr < phys_avail[i] &&
284202046Simp		    highaddr > phys_avail[i]))
285202046Simp			return (1);
286202046Simp	}
287202046Simp	return (0);
288202046Simp}
289202046Simp
290178172Simp/*
291178172Simp * Convenience function for manipulating driver locks from busdma (during
292178172Simp * busdma_swi, for example).  Drivers that don't provide their own locks
293178172Simp * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
294178172Simp * non-mutex locking scheme don't have to use this at all.
295178172Simp */
296178172Simpvoid
297178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
298178172Simp{
299178172Simp	struct mtx *dmtx;
300178172Simp
301178172Simp	dmtx = (struct mtx *)arg;
302178172Simp	switch (op) {
303178172Simp	case BUS_DMA_LOCK:
304178172Simp		mtx_lock(dmtx);
305178172Simp		break;
306178172Simp	case BUS_DMA_UNLOCK:
307178172Simp		mtx_unlock(dmtx);
308178172Simp		break;
309178172Simp	default:
310178172Simp		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
311178172Simp	}
312178172Simp}
313178172Simp
314178172Simp/*
315178172Simp * dflt_lock should never get called.  It gets put into the dma tag when
316178172Simp * lockfunc == NULL, which is only valid if the maps that are associated
317178172Simp * with the tag are meant to never be defered.
318178172Simp * XXX Should have a way to identify which driver is responsible here.
319178172Simp */
320178172Simpstatic void
321178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op)
322178172Simp{
323178172Simp#ifdef INVARIANTS
324178172Simp	panic("driver error: busdma dflt_lock called");
325178172Simp#else
326178172Simp	printf("DRIVER_ERROR: busdma dflt_lock called\n");
327178172Simp#endif
328178172Simp}
329178172Simp
330178172Simpstatic __inline bus_dmamap_t
331246713Skib_busdma_alloc_dmamap(bus_dma_tag_t dmat)
332178172Simp{
333246713Skib	struct sync_list *slist;
334178172Simp	bus_dmamap_t map;
335178172Simp
336289701Sian	slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
337246713Skib	if (slist == NULL)
338246713Skib		return (NULL);
339289701Sian	map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
340289701Sian	if (map != NULL)
341246713Skib		map->slist = slist;
342289701Sian	else
343289708Sian		free(slist, M_BUSDMA);
344178172Simp	return (map);
345178172Simp}
346178172Simp
347178172Simpstatic __inline void
348178172Simp_busdma_free_dmamap(bus_dmamap_t map)
349178172Simp{
350289708Sian
351289708Sian	free(map->slist, M_BUSDMA);
352289701Sian	uma_zfree(dmamap_zone, map);
353178172Simp}
354178172Simp
355202046Simp/*
356202046Simp * Allocate a device specific dma_tag.
357202046Simp */
358202046Simp#define SEG_NB 1024
359202046Simp
360178172Simpint
361178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
362232356Sjhb    bus_addr_t boundary, bus_addr_t lowaddr,
363212284Sjchandra    bus_addr_t highaddr, bus_dma_filter_t *filter,
364212284Sjchandra    void *filterarg, bus_size_t maxsize, int nsegments,
365212284Sjchandra    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
366212284Sjchandra    void *lockfuncarg, bus_dma_tag_t *dmat)
367178172Simp{
368178172Simp	bus_dma_tag_t newtag;
369178172Simp	int error = 0;
370178172Simp	/* Return a NULL tag on failure */
371178172Simp	*dmat = NULL;
372202046Simp	if (!parent)
373202046Simp		parent = mips_root_dma_tag;
374178172Simp
375289701Sian	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
376178172Simp	if (newtag == NULL) {
377178172Simp		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
378178172Simp		    __func__, newtag, 0, error);
379178172Simp		return (ENOMEM);
380178172Simp	}
381178172Simp
382178172Simp	newtag->parent = parent;
383178172Simp	newtag->alignment = alignment;
384178172Simp	newtag->boundary = boundary;
385202046Simp	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
386202046Simp	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
387178172Simp	newtag->filter = filter;
388178172Simp	newtag->filterarg = filterarg;
389212284Sjchandra	newtag->maxsize = maxsize;
390212284Sjchandra	newtag->nsegments = nsegments;
391178172Simp	newtag->maxsegsz = maxsegsz;
392178172Simp	newtag->flags = flags;
393204689Sneel	if (cpuinfo.cache_coherent_dma)
394204689Sneel		newtag->flags |= BUS_DMA_COHERENT;
395178172Simp	newtag->ref_count = 1; /* Count ourself */
396178172Simp	newtag->map_count = 0;
397178172Simp	if (lockfunc != NULL) {
398178172Simp		newtag->lockfunc = lockfunc;
399178172Simp		newtag->lockfuncarg = lockfuncarg;
400178172Simp	} else {
401178172Simp		newtag->lockfunc = dflt_lock;
402178172Simp		newtag->lockfuncarg = NULL;
403178172Simp	}
404240177Sjhb	newtag->segments = NULL;
405240177Sjhb
406212284Sjchandra	/*
407202046Simp	 * Take into account any restrictions imposed by our parent tag
408202046Simp	 */
409212284Sjchandra	if (parent != NULL) {
410232356Sjhb		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
411232356Sjhb		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
412178172Simp		if (newtag->boundary == 0)
413178172Simp			newtag->boundary = parent->boundary;
414178172Simp		else if (parent->boundary != 0)
415212284Sjchandra			newtag->boundary =
416232356Sjhb			    MIN(parent->boundary, newtag->boundary);
417202046Simp		if ((newtag->filter != NULL) ||
418202046Simp		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
419202046Simp			newtag->flags |= BUS_DMA_COULD_BOUNCE;
420212284Sjchandra		if (newtag->filter == NULL) {
421212284Sjchandra			/*
422212284Sjchandra			* Short circuit looking at our parent directly
423212284Sjchandra			* since we have encapsulated all of its information
424212284Sjchandra			*/
425212284Sjchandra			newtag->filter = parent->filter;
426212284Sjchandra			newtag->filterarg = parent->filterarg;
427212284Sjchandra			newtag->parent = parent->parent;
428178172Simp		}
429178172Simp		if (newtag->parent != NULL)
430178172Simp			atomic_add_int(&parent->ref_count, 1);
431178172Simp	}
432202046Simp	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
433202046Simp	 || newtag->alignment > 1)
434202046Simp		newtag->flags |= BUS_DMA_COULD_BOUNCE;
435178172Simp
436202046Simp	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
437202046Simp	    (flags & BUS_DMA_ALLOCNOW) != 0) {
438202046Simp		struct bounce_zone *bz;
439202046Simp
440202046Simp		/* Must bounce */
441202046Simp
442202046Simp		if ((error = alloc_bounce_zone(newtag)) != 0) {
443289708Sian			free(newtag, M_BUSDMA);
444202046Simp			return (error);
445202046Simp		}
446202046Simp		bz = newtag->bounce_zone;
447202046Simp
448202046Simp		if (ptoa(bz->total_bpages) < maxsize) {
449202046Simp			int pages;
450202046Simp
451202046Simp			pages = atop(maxsize) - bz->total_bpages;
452202046Simp
453202046Simp			/* Add pages to our bounce pool */
454202046Simp			if (alloc_bounce_pages(newtag, pages) < pages)
455202046Simp				error = ENOMEM;
456202046Simp		}
457202046Simp		/* Performed initial allocation */
458202046Simp		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
459202046Simp	} else
460202046Simp		newtag->bounce_zone = NULL;
461202046Simp	if (error != 0)
462289708Sian		free(newtag, M_BUSDMA);
463202046Simp	else
464178172Simp		*dmat = newtag;
465178172Simp	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
466178172Simp	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
467202046Simp
468178172Simp	return (error);
469178172Simp}
470178172Simp
471178172Simpint
472178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat)
473178172Simp{
474178172Simp#ifdef KTR
475178172Simp	bus_dma_tag_t dmat_copy = dmat;
476178172Simp#endif
477178172Simp
478178172Simp	if (dmat != NULL) {
479212284Sjchandra		if (dmat->map_count != 0)
480212284Sjchandra			return (EBUSY);
481178172Simp
482212284Sjchandra		while (dmat != NULL) {
483212284Sjchandra			bus_dma_tag_t parent;
484178172Simp
485212284Sjchandra			parent = dmat->parent;
486212284Sjchandra			atomic_subtract_int(&dmat->ref_count, 1);
487212284Sjchandra			if (dmat->ref_count == 0) {
488240177Sjhb				if (dmat->segments != NULL)
489289708Sian					free(dmat->segments, M_BUSDMA);
490289708Sian				free(dmat, M_BUSDMA);
491212284Sjchandra				/*
492212284Sjchandra				 * Last reference count, so
493212284Sjchandra				 * release our reference
494212284Sjchandra				 * count on our parent.
495212284Sjchandra				 */
496212284Sjchandra				dmat = parent;
497212284Sjchandra			} else
498240177Sjhb				dmat = NULL;
499212284Sjchandra		}
500212284Sjchandra	}
501178172Simp	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
502178172Simp
503212284Sjchandra	return (0);
504178172Simp}
505178172Simp
506202046Simp#include <sys/kdb.h>
507178172Simp/*
508178172Simp * Allocate a handle for mapping from kva/uva/physical
509178172Simp * address space into bus device space.
510178172Simp */
511178172Simpint
512178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
513178172Simp{
514178172Simp	bus_dmamap_t newmap;
515178172Simp	int error = 0;
516178172Simp
517240177Sjhb	if (dmat->segments == NULL) {
518240177Sjhb		dmat->segments = (bus_dma_segment_t *)malloc(
519289701Sian		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
520240177Sjhb		    M_NOWAIT);
521240177Sjhb		if (dmat->segments == NULL) {
522240177Sjhb			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
523240177Sjhb			    __func__, dmat, ENOMEM);
524240177Sjhb			return (ENOMEM);
525240177Sjhb		}
526240177Sjhb	}
527240177Sjhb
528246713Skib	newmap = _busdma_alloc_dmamap(dmat);
529178172Simp	if (newmap == NULL) {
530178172Simp		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
531178172Simp		return (ENOMEM);
532178172Simp	}
533178172Simp	*mapp = newmap;
534178172Simp
535202046Simp	/*
536202046Simp	 * Bouncing might be required if the driver asks for an active
537202046Simp	 * exclusion region, a data alignment that is stricter than 1, and/or
538202046Simp	 * an active address boundary.
539202046Simp	 */
540202046Simp	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
541202046Simp
542202046Simp		/* Must bounce */
543202046Simp		struct bounce_zone *bz;
544202046Simp		int maxpages;
545202046Simp
546202046Simp		if (dmat->bounce_zone == NULL) {
547202046Simp			if ((error = alloc_bounce_zone(dmat)) != 0) {
548202046Simp				_busdma_free_dmamap(newmap);
549202046Simp				*mapp = NULL;
550202046Simp				return (error);
551202046Simp			}
552202046Simp		}
553202046Simp		bz = dmat->bounce_zone;
554202046Simp
555202046Simp		/* Initialize the new map */
556202046Simp		STAILQ_INIT(&((*mapp)->bpages));
557202046Simp
558202046Simp		/*
559202046Simp		 * Attempt to add pages to our pool on a per-instance
560202046Simp		 * basis up to a sane limit.
561202046Simp		 */
562202046Simp		maxpages = MAX_BPAGES;
563202046Simp		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
564202046Simp		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
565202046Simp			int pages;
566202046Simp
567202046Simp			pages = MAX(atop(dmat->maxsize), 1);
568202046Simp			pages = MIN(maxpages - bz->total_bpages, pages);
569202046Simp			pages = MAX(pages, 1);
570202046Simp			if (alloc_bounce_pages(dmat, pages) < pages)
571202046Simp				error = ENOMEM;
572202046Simp
573202046Simp			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
574202046Simp				if (error == 0)
575202046Simp					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
576202046Simp			} else {
577202046Simp				error = 0;
578202046Simp			}
579202046Simp		}
580202046Simp		bz->map_count++;
581202046Simp	}
582202046Simp
583178172Simp	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
584178172Simp	    __func__, dmat, dmat->flags, error);
585178172Simp
586178172Simp	return (0);
587178172Simp}
588178172Simp
589178172Simp/*
590178172Simp * Destroy a handle for mapping from kva/uva/physical
591178172Simp * address space into bus device space.
592178172Simp */
593178172Simpint
594178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
595178172Simp{
596202046Simp
597246713Skib	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
598202046Simp		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
599202046Simp		    __func__, dmat, EBUSY);
600202046Simp		return (EBUSY);
601202046Simp	}
602202046Simp	if (dmat->bounce_zone)
603202046Simp		dmat->bounce_zone->map_count--;
604242465Sadrian	_busdma_free_dmamap(map);
605178172Simp	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
606178172Simp        return (0);
607178172Simp}
608178172Simp
609178172Simp/*
610178172Simp * Allocate a piece of memory that can be efficiently mapped into
611178172Simp * bus device space based on the constraints lited in the dma tag.
612178172Simp * A dmamap to for use with dmamap_load is also allocated.
613178172Simp */
614178172Simpint
615289701Sianbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
616212284Sjchandra    bus_dmamap_t *mapp)
617178172Simp{
618178172Simp	bus_dmamap_t newmap = NULL;
619289701Sian	busdma_bufalloc_t ba;
620289701Sian	struct busdma_bufzone *bufzone;
621289701Sian	vm_memattr_t memattr;
622289701Sian	void *vaddr;
623178172Simp
624178172Simp	int mflags;
625178172Simp
626178172Simp	if (flags & BUS_DMA_NOWAIT)
627178172Simp		mflags = M_NOWAIT;
628178172Simp	else
629178172Simp		mflags = M_WAITOK;
630240177Sjhb	if (dmat->segments == NULL) {
631240177Sjhb		dmat->segments = (bus_dma_segment_t *)malloc(
632289701Sian		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
633240177Sjhb		    mflags);
634240177Sjhb		if (dmat->segments == NULL) {
635240177Sjhb			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
636240177Sjhb			    __func__, dmat, dmat->flags, ENOMEM);
637240177Sjhb			return (ENOMEM);
638240177Sjhb		}
639240177Sjhb	}
640178172Simp
641246713Skib	newmap = _busdma_alloc_dmamap(dmat);
642178172Simp	if (newmap == NULL) {
643178172Simp		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
644178172Simp		    __func__, dmat, dmat->flags, ENOMEM);
645178172Simp		return (ENOMEM);
646178172Simp	}
647202046Simp
648204689Sneel	/*
649204689Sneel	 * If all the memory is coherent with DMA then we don't need to
650204689Sneel	 * do anything special for a coherent mapping request.
651204689Sneel	 */
652204689Sneel	if (dmat->flags & BUS_DMA_COHERENT)
653204689Sneel	    flags &= ~BUS_DMA_COHERENT;
654204689Sneel
655289701Sian	if (flags & BUS_DMA_COHERENT) {
656289701Sian		memattr = VM_MEMATTR_UNCACHEABLE;
657289701Sian		ba = coherent_allocator;
658289701Sian		newmap->flags |= DMAMAP_UNCACHEABLE;
659289701Sian	} else {
660289701Sian		memattr = VM_MEMATTR_DEFAULT;
661289701Sian		ba = standard_allocator;
662289701Sian	}
663289701Sian	/* All buffers we allocate are cache-aligned. */
664289701Sian	newmap->flags |= DMAMAP_CACHE_ALIGNED;
665289701Sian
666289701Sian	if (flags & BUS_DMA_ZERO)
667289701Sian		mflags |= M_ZERO;
668289701Sian
669204689Sneel	/*
670289701Sian	 * Try to find a bufzone in the allocator that holds a cache of buffers
671289701Sian	 * of the right size for this request.  If the buffer is too big to be
672289701Sian	 * held in the allocator cache, this returns NULL.
673204689Sneel	 */
674289701Sian	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
675204689Sneel
676289701Sian	/*
677289701Sian	 * Allocate the buffer from the uma(9) allocator if...
678289701Sian	 *  - It's small enough to be in the allocator (bufzone not NULL).
679289701Sian	 *  - The alignment constraint isn't larger than the allocation size
680289701Sian	 *    (the allocator aligns buffers to their size boundaries).
681289701Sian	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
682289701Sian	 * else allocate non-contiguous pages if...
683289701Sian	 *  - The page count that could get allocated doesn't exceed nsegments.
684289701Sian	 *  - The alignment constraint isn't larger than a page boundary.
685289701Sian	 *  - There are no boundary-crossing constraints.
686289701Sian	 * else allocate a block of contiguous pages because one or more of the
687289701Sian	 * constraints is something that only the contig allocator can fulfill.
688289701Sian	 */
689289701Sian	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
690289701Sian	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
691289701Sian		vaddr = uma_zalloc(bufzone->umazone, mflags);
692289701Sian	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
693289701Sian	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
694289701Sian		vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
695289701Sian		    mflags, 0, dmat->lowaddr, memattr);
696212284Sjchandra	} else {
697289701Sian		vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
698289701Sian		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
699289701Sian		    memattr);
700212284Sjchandra	}
701289701Sian	if (vaddr == NULL) {
702289701Sian		_busdma_free_dmamap(newmap);
703289701Sian		newmap = NULL;
704289701Sian	} else {
705289701Sian		newmap->sync_count = 0;
706178172Simp	}
707289701Sian	*vaddrp = vaddr;
708289701Sian	*mapp = newmap;
709202046Simp
710289701Sian	return (vaddr == NULL ? ENOMEM : 0);
711178172Simp}
712178172Simp
713178172Simp/*
714178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated
715178172Simp * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
716178172Simp */
717178172Simpvoid
718178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
719178172Simp{
720289701Sian	struct busdma_bufzone *bufzone;
721289701Sian	busdma_bufalloc_t ba;
722202046Simp
723212283Sjchandra	if (map->flags & DMAMAP_UNCACHEABLE)
724289701Sian		ba = coherent_allocator;
725212284Sjchandra	else
726289701Sian		ba = standard_allocator;
727202046Simp
728289708Sian	free(map->slist, M_BUSDMA);
729289701Sian	uma_zfree(dmamap_zone, map);
730289701Sian
731289701Sian	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
732289701Sian
733289701Sian	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
734289701Sian	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
735289701Sian		uma_zfree(bufzone->umazone, vaddr);
736289701Sian	else
737289701Sian		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
738178172Simp	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
739202046Simp}
740178172Simp
741246713Skibstatic void
742246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
743246713Skib    bus_size_t buflen, int flags)
744246713Skib{
745246713Skib	bus_addr_t curaddr;
746246713Skib	bus_size_t sgsize;
747246713Skib
748246713Skib	if ((map->pagesneeded == 0)) {
749246713Skib		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
750246713Skib		    dmat->lowaddr, dmat->boundary, dmat->alignment);
751246713Skib		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
752246713Skib		    map, map->pagesneeded);
753246713Skib		/*
754246713Skib		 * Count the number of bounce pages
755246713Skib		 * needed in order to complete this transfer
756246713Skib		 */
757246713Skib		curaddr = buf;
758246713Skib		while (buflen != 0) {
759246713Skib			sgsize = MIN(buflen, dmat->maxsegsz);
760246713Skib			if (run_filter(dmat, curaddr) != 0) {
761246713Skib				sgsize = MIN(sgsize, PAGE_SIZE);
762246713Skib				map->pagesneeded++;
763246713Skib			}
764246713Skib			curaddr += sgsize;
765246713Skib			buflen -= sgsize;
766246713Skib		}
767246713Skib		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
768246713Skib	}
769246713Skib}
770246713Skib
771246713Skibstatic void
772202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
773202046Simp    void *buf, bus_size_t buflen, int flags)
774202046Simp{
775202046Simp	vm_offset_t vaddr;
776202046Simp	vm_offset_t vendaddr;
777202046Simp	bus_addr_t paddr;
778202046Simp
779202046Simp	if ((map->pagesneeded == 0)) {
780202046Simp		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
781202046Simp		    dmat->lowaddr, dmat->boundary, dmat->alignment);
782202046Simp		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
783202046Simp		    map, map->pagesneeded);
784202046Simp		/*
785202046Simp		 * Count the number of bounce pages
786202046Simp		 * needed in order to complete this transfer
787202046Simp		 */
788206405Snwhitehorn		vaddr = (vm_offset_t)buf;
789202046Simp		vendaddr = (vm_offset_t)buf + buflen;
790202046Simp
791202046Simp		while (vaddr < vendaddr) {
792206405Snwhitehorn			bus_size_t sg_len;
793206405Snwhitehorn
794202046Simp			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
795206405Snwhitehorn			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
796202046Simp			paddr = pmap_kextract(vaddr);
797202046Simp			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
798206405Snwhitehorn			    run_filter(dmat, paddr) != 0) {
799206405Snwhitehorn				sg_len = roundup2(sg_len, dmat->alignment);
800202046Simp				map->pagesneeded++;
801206405Snwhitehorn			}
802206405Snwhitehorn			vaddr += sg_len;
803202046Simp		}
804202046Simp		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
805202046Simp	}
806246713Skib}
807202046Simp
808246713Skibstatic int
809246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
810246713Skib{
811246713Skib
812202046Simp	/* Reserve Necessary Bounce Pages */
813246713Skib	mtx_lock(&bounce_lock);
814246713Skib	if (flags & BUS_DMA_NOWAIT) {
815246713Skib		if (reserve_bounce_pages(dmat, map, 0) != 0) {
816246713Skib			mtx_unlock(&bounce_lock);
817246713Skib			return (ENOMEM);
818202046Simp		}
819246713Skib	} else {
820246713Skib		if (reserve_bounce_pages(dmat, map, 1) != 0) {
821246713Skib			/* Queue us for resources */
822246713Skib			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
823246713Skib			    map, links);
824246713Skib			mtx_unlock(&bounce_lock);
825246713Skib			return (EINPROGRESS);
826246713Skib		}
827202046Simp	}
828246713Skib	mtx_unlock(&bounce_lock);
829202046Simp
830202046Simp	return (0);
831178172Simp}
832178172Simp
833178172Simp/*
834246713Skib * Add a single contiguous physical range to the segment list.
835246713Skib */
836246713Skibstatic int
837246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
838246713Skib    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
839246713Skib{
840246713Skib	bus_addr_t baddr, bmask;
841246713Skib	int seg;
842246713Skib
843246713Skib	/*
844246713Skib	 * Make sure we don't cross any boundaries.
845246713Skib	 */
846246713Skib	bmask = ~(dmat->boundary - 1);
847246713Skib	if (dmat->boundary > 0) {
848246713Skib		baddr = (curaddr + dmat->boundary) & bmask;
849246713Skib		if (sgsize > (baddr - curaddr))
850246713Skib			sgsize = (baddr - curaddr);
851246713Skib	}
852246713Skib	/*
853246713Skib	 * Insert chunk into a segment, coalescing with
854246713Skib	 * the previous segment if possible.
855246713Skib	 */
856246713Skib	seg = *segp;
857246713Skib	if (seg >= 0 &&
858246713Skib	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
859246713Skib	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
860246713Skib	    (dmat->boundary == 0 ||
861246713Skib	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
862246713Skib		segs[seg].ds_len += sgsize;
863246713Skib	} else {
864246713Skib		if (++seg >= dmat->nsegments)
865246713Skib			return (0);
866246713Skib		segs[seg].ds_addr = curaddr;
867246713Skib		segs[seg].ds_len = sgsize;
868246713Skib	}
869246713Skib	*segp = seg;
870246713Skib	return (sgsize);
871246713Skib}
872246713Skib
873246713Skib/*
874246713Skib * Utility function to load a physical buffer.  segp contains
875246713Skib * the starting segment on entrace, and the ending segment on exit.
876246713Skib */
877246713Skibint
878246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
879246713Skib    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
880246713Skib    int *segp)
881246713Skib{
882246713Skib	bus_addr_t curaddr;
883246713Skib	bus_size_t sgsize;
884246713Skib	int error;
885246713Skib
886246713Skib	if (segs == NULL)
887246713Skib		segs = dmat->segments;
888246713Skib
889246713Skib	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
890246713Skib		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
891246713Skib		if (map->pagesneeded != 0) {
892246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
893246713Skib			if (error)
894246713Skib				return (error);
895246713Skib		}
896246713Skib	}
897246713Skib
898246713Skib	while (buflen > 0) {
899246713Skib		curaddr = buf;
900246713Skib		sgsize = MIN(buflen, dmat->maxsegsz);
901246713Skib		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
902246713Skib		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
903246713Skib			sgsize = MIN(sgsize, PAGE_SIZE);
904246713Skib			curaddr = add_bounce_page(dmat, map, 0, curaddr,
905246713Skib			    sgsize);
906246713Skib		}
907246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
908246713Skib		    segp);
909246713Skib		if (sgsize == 0)
910246713Skib			break;
911246713Skib		buf += sgsize;
912246713Skib		buflen -= sgsize;
913246713Skib	}
914246713Skib
915246713Skib	/*
916246713Skib	 * Did we fit?
917246713Skib	 */
918246713Skib	if (buflen != 0) {
919246713Skib		_bus_dmamap_unload(dmat, map);
920246713Skib		return (EFBIG); /* XXX better return value here? */
921246713Skib	}
922246713Skib	return (0);
923246713Skib}
924246713Skib
925257228Skibint
926257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
927257228Skib    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
928257228Skib    bus_dma_segment_t *segs, int *segp)
929257228Skib{
930257228Skib
931257228Skib	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
932257228Skib	    segs, segp));
933257228Skib}
934257228Skib
935246713Skib/*
936246713Skib * Utility function to load a linear buffer.  segp contains
937178172Simp * the starting segment on entrance, and the ending segment on exit.
938178172Simp * first indicates if this is the first invocation of this function.
939178172Simp */
940246713Skibint
941246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
942246713Skib    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
943246713Skib    int *segp)
944178172Simp{
945178172Simp	bus_size_t sgsize;
946246713Skib	bus_addr_t curaddr;
947246713Skib	struct sync_list *sl;
948178172Simp	vm_offset_t vaddr = (vm_offset_t)buf;
949178172Simp	int error = 0;
950178172Simp
951178172Simp
952246713Skib	if (segs == NULL)
953246713Skib		segs = dmat->segments;
954289716Sian	if ((flags & BUS_DMA_LOAD_MBUF) != 0)
955289716Sian		map->flags |= DMAMAP_CACHE_ALIGNED;
956246713Skib
957202046Simp	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
958246713Skib		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
959246713Skib		if (map->pagesneeded != 0) {
960246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
961246713Skib			if (error)
962246713Skib				return (error);
963246713Skib		}
964202046Simp	}
965202046Simp	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
966202046Simp	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
967202046Simp
968246713Skib	while (buflen > 0) {
969178172Simp		/*
970178172Simp		 * Get the physical address for this segment.
971202046Simp		 *
972202046Simp		 * XXX Don't support checking for coherent mappings
973202046Simp		 * XXX in user address space.
974178172Simp		 */
975178172Simp		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
976178172Simp		curaddr = pmap_kextract(vaddr);
977178172Simp
978178172Simp		/*
979178172Simp		 * Compute the segment size, and adjust counts.
980178172Simp		 */
981178172Simp		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
982202046Simp		if (sgsize > dmat->maxsegsz)
983202046Simp			sgsize = dmat->maxsegsz;
984178172Simp		if (buflen < sgsize)
985178172Simp			sgsize = buflen;
986178172Simp
987202046Simp		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
988202046Simp		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
989246713Skib			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
990246713Skib			    sgsize);
991178172Simp		} else {
992246713Skib			sl = &map->slist[map->sync_count - 1];
993246713Skib			if (map->sync_count == 0 ||
994246713Skib			    vaddr != sl->vaddr + sl->datacount) {
995246713Skib				if (++map->sync_count > dmat->nsegments)
996246713Skib					goto cleanup;
997246713Skib				sl++;
998246713Skib				sl->vaddr = vaddr;
999246713Skib				sl->datacount = sgsize;
1000246713Skib				sl->busaddr = curaddr;
1001246713Skib			} else
1002246713Skib				sl->datacount += sgsize;
1003178172Simp		}
1004246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1005246713Skib		    segp);
1006246713Skib		if (sgsize == 0)
1007178172Simp			break;
1008178172Simp		vaddr += sgsize;
1009178172Simp		buflen -= sgsize;
1010178172Simp	}
1011178172Simp
1012246713Skibcleanup:
1013178172Simp	/*
1014178172Simp	 * Did we fit?
1015178172Simp	 */
1016246713Skib	if (buflen != 0) {
1017246713Skib		_bus_dmamap_unload(dmat, map);
1018202046Simp		error = EFBIG; /* XXX better return value here? */
1019246713Skib	}
1020202046Simp	return (error);
1021178172Simp}
1022178172Simp
1023246713Skibvoid
1024246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1025246713Skib    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
1026178172Simp{
1027178172Simp
1028178172Simp	KASSERT(dmat != NULL, ("dmatag is NULL"));
1029178172Simp	KASSERT(map != NULL, ("dmamap is NULL"));
1030246713Skib	map->mem = *mem;
1031202046Simp	map->callback = callback;
1032202046Simp	map->callback_arg = callback_arg;
1033178172Simp}
1034178172Simp
1035246713Skibbus_dma_segment_t *
1036246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1037246713Skib    bus_dma_segment_t *segs, int nsegs, int error)
1038178172Simp{
1039178172Simp
1040246713Skib	if (segs == NULL)
1041246713Skib		segs = dmat->segments;
1042246713Skib	return (segs);
1043178172Simp}
1044178172Simp
1045178172Simp/*
1046178172Simp * Release the mapping held by map.
1047178172Simp */
1048178172Simpvoid
1049178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1050178172Simp{
1051202046Simp	struct bounce_page *bpage;
1052178172Simp
1053202046Simp	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1054202046Simp		STAILQ_REMOVE_HEAD(&map->bpages, links);
1055202046Simp		free_bounce_page(dmat, bpage);
1056202046Simp	}
1057246713Skib	map->sync_count = 0;
1058178172Simp	return;
1059178172Simp}
1060178172Simp
1061202046Simpstatic void
1062289701Sianbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
1063178172Simp{
1064202046Simp	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1065202046Simp	vm_offset_t buf_cl, buf_clend;
1066202046Simp	vm_size_t size_cl, size_clend;
1067202046Simp	int cache_linesize_mask = mips_pdcache_linesize - 1;
1068178172Simp
1069202046Simp	/*
1070202046Simp	 * dcache invalidation operates on cache line aligned addresses
1071202046Simp	 * and could modify areas of memory that share the same cache line
1072202046Simp	 * at the beginning and the ending of the buffer. In order to
1073202046Simp	 * prevent a data loss we save these chunks in temporary buffer
1074289701Sian	 * before invalidation and restore them afer it.
1075289701Sian	 *
1076289716Sian	 * If the aligned flag is set the buffer is either an mbuf or came from
1077289716Sian	 * our allocator caches.  In both cases they are always sized and
1078289716Sian	 * aligned to cacheline boundaries, so we can skip preserving nearby
1079289716Sian	 * data if a transfer appears to overlap cachelines.  An mbuf in
1080289716Sian	 * particular will usually appear to be overlapped because of offsetting
1081289716Sian	 * within the buffer to align the L3 headers, but we know that the bytes
1082289716Sian	 * preceeding that offset are part of the same mbuf memory and are not
1083289716Sian	 * unrelated adjacent data (and a rule of mbuf handling is that the cpu
1084289716Sian	 * is not allowed to touch the mbuf while dma is in progress, including
1085289716Sian	 * header fields).
1086202046Simp	 */
1087289701Sian	if (aligned) {
1088289701Sian		size_cl = 0;
1089289701Sian		size_clend = 0;
1090289701Sian	} else {
1091289701Sian		buf_cl = buf & ~cache_linesize_mask;
1092289701Sian		size_cl = buf & cache_linesize_mask;
1093289701Sian		buf_clend = buf + len;
1094289701Sian		size_clend = (mips_pdcache_linesize -
1095289701Sian		    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1096289701Sian	}
1097202046Simp
1098178172Simp	switch (op) {
1099202046Simp	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1100202046Simp	case BUS_DMASYNC_POSTREAD:
1101202046Simp
1102202046Simp		/*
1103202046Simp		 * Save buffers that might be modified by invalidation
1104202046Simp		 */
1105202046Simp		if (size_cl)
1106202046Simp			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1107202046Simp		if (size_clend)
1108202046Simp			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1109246713Skib		mips_dcache_inv_range(buf, len);
1110202046Simp		/*
1111202046Simp		 * Restore them
1112202046Simp		 */
1113202046Simp		if (size_cl)
1114202046Simp			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1115202046Simp		if (size_clend)
1116202046Simp			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1117203080Skan		/*
1118203080Skan		 * Copies above have brought corresponding memory
1119203080Skan		 * cache lines back into dirty state. Write them back
1120203080Skan		 * out and invalidate affected cache lines again if
1121203080Skan		 * necessary.
1122203080Skan		 */
1123203080Skan		if (size_cl)
1124246713Skib			mips_dcache_wbinv_range(buf_cl, size_cl);
1125203080Skan		if (size_clend && (size_cl == 0 ||
1126203080Skan                    buf_clend - buf_cl > mips_pdcache_linesize))
1127246713Skib			mips_dcache_wbinv_range(buf_clend, size_clend);
1128202046Simp		break;
1129202046Simp
1130178172Simp	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1131246713Skib		mips_dcache_wbinv_range(buf_cl, len);
1132178172Simp		break;
1133178172Simp
1134178172Simp	case BUS_DMASYNC_PREREAD:
1135202046Simp		/*
1136202046Simp		 * Save buffers that might be modified by invalidation
1137202046Simp		 */
1138202046Simp		if (size_cl)
1139202046Simp			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1140202046Simp		if (size_clend)
1141202046Simp			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1142246713Skib		mips_dcache_inv_range(buf, len);
1143202046Simp		/*
1144202046Simp		 * Restore them
1145202046Simp		 */
1146202046Simp		if (size_cl)
1147202046Simp			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1148202046Simp		if (size_clend)
1149202046Simp			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1150203080Skan		/*
1151203080Skan		 * Copies above have brought corresponding memory
1152203080Skan		 * cache lines back into dirty state. Write them back
1153203080Skan		 * out and invalidate affected cache lines again if
1154203080Skan		 * necessary.
1155203080Skan		 */
1156203080Skan		if (size_cl)
1157246713Skib			mips_dcache_wbinv_range(buf_cl, size_cl);
1158203080Skan		if (size_clend && (size_cl == 0 ||
1159203080Skan                    buf_clend - buf_cl > mips_pdcache_linesize))
1160246713Skib			mips_dcache_wbinv_range(buf_clend, size_clend);
1161178172Simp		break;
1162178172Simp
1163178172Simp	case BUS_DMASYNC_PREWRITE:
1164246713Skib		mips_dcache_wb_range(buf, len);
1165178172Simp		break;
1166178172Simp	}
1167178172Simp}
1168178172Simp
1169202046Simpstatic void
1170202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1171202046Simp{
1172202046Simp	struct bounce_page *bpage;
1173202046Simp
1174202046Simp	STAILQ_FOREACH(bpage, &map->bpages, links) {
1175202046Simp		if (op & BUS_DMASYNC_PREWRITE) {
1176246713Skib			if (bpage->datavaddr != 0)
1177246713Skib				bcopy((void *)bpage->datavaddr,
1178246713Skib				    (void *)(bpage->vaddr_nocache != 0 ?
1179246713Skib					     bpage->vaddr_nocache :
1180246713Skib					     bpage->vaddr),
1181246713Skib				    bpage->datacount);
1182246713Skib			else
1183246713Skib				physcopyout(bpage->dataaddr,
1184246713Skib				    (void *)(bpage->vaddr_nocache != 0 ?
1185246713Skib					     bpage->vaddr_nocache :
1186246713Skib					     bpage->vaddr),
1187246713Skib				    bpage->datacount);
1188202046Simp			if (bpage->vaddr_nocache == 0) {
1189202046Simp				mips_dcache_wb_range(bpage->vaddr,
1190202046Simp				    bpage->datacount);
1191202046Simp			}
1192202046Simp			dmat->bounce_zone->total_bounced++;
1193202046Simp		}
1194202046Simp		if (op & BUS_DMASYNC_POSTREAD) {
1195202046Simp			if (bpage->vaddr_nocache == 0) {
1196202046Simp				mips_dcache_inv_range(bpage->vaddr,
1197202046Simp				    bpage->datacount);
1198202046Simp			}
1199246713Skib			if (bpage->datavaddr != 0)
1200246713Skib				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1201246713Skib				    bpage->vaddr_nocache : bpage->vaddr),
1202246713Skib				    (void *)bpage->datavaddr, bpage->datacount);
1203246713Skib			else
1204246713Skib				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1205246713Skib				    bpage->vaddr_nocache : bpage->vaddr),
1206246713Skib				    bpage->dataaddr, bpage->datacount);
1207202046Simp			dmat->bounce_zone->total_bounced++;
1208202046Simp		}
1209202046Simp	}
1210202046Simp}
1211202046Simp
1212178172Simpvoid
1213178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1214178172Simp{
1215246713Skib	struct sync_list *sl, *end;
1216289701Sian	int aligned;
1217178172Simp
1218202046Simp	if (op == BUS_DMASYNC_POSTWRITE)
1219178172Simp		return;
1220202046Simp	if (STAILQ_FIRST(&map->bpages))
1221202046Simp		_bus_dmamap_sync_bp(dmat, map, op);
1222204689Sneel
1223204689Sneel	if (dmat->flags & BUS_DMA_COHERENT)
1224202046Simp		return;
1225204689Sneel
1226204689Sneel	if (map->flags & DMAMAP_UNCACHEABLE)
1227204689Sneel		return;
1228204689Sneel
1229289701Sian	aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
1230289701Sian
1231178172Simp	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1232246713Skib	if (map->sync_count) {
1233246713Skib		end = &map->slist[map->sync_count];
1234246713Skib		for (sl = &map->slist[0]; sl != end; sl++)
1235289701Sian			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
1236289701Sian			    aligned);
1237178172Simp	}
1238178172Simp}
1239202046Simp
1240202046Simpstatic void
1241202046Simpinit_bounce_pages(void *dummy __unused)
1242202046Simp{
1243202046Simp
1244202046Simp	total_bpages = 0;
1245202046Simp	STAILQ_INIT(&bounce_zone_list);
1246202046Simp	STAILQ_INIT(&bounce_map_waitinglist);
1247202046Simp	STAILQ_INIT(&bounce_map_callbacklist);
1248202046Simp	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1249202046Simp}
1250202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1251202046Simp
1252202046Simpstatic struct sysctl_ctx_list *
1253202046Simpbusdma_sysctl_tree(struct bounce_zone *bz)
1254202046Simp{
1255202046Simp	return (&bz->sysctl_tree);
1256202046Simp}
1257202046Simp
1258202046Simpstatic struct sysctl_oid *
1259202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz)
1260202046Simp{
1261202046Simp	return (bz->sysctl_tree_top);
1262202046Simp}
1263202046Simp
1264202046Simpstatic int
1265202046Simpalloc_bounce_zone(bus_dma_tag_t dmat)
1266202046Simp{
1267202046Simp	struct bounce_zone *bz;
1268202046Simp
1269202046Simp	/* Check to see if we already have a suitable zone */
1270202046Simp	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1271202046Simp		if ((dmat->alignment <= bz->alignment)
1272202046Simp		 && (dmat->lowaddr >= bz->lowaddr)) {
1273202046Simp			dmat->bounce_zone = bz;
1274202046Simp			return (0);
1275202046Simp		}
1276202046Simp	}
1277202046Simp
1278289701Sian	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
1279202046Simp	    M_NOWAIT | M_ZERO)) == NULL)
1280202046Simp		return (ENOMEM);
1281202046Simp
1282202046Simp	STAILQ_INIT(&bz->bounce_page_list);
1283202046Simp	bz->free_bpages = 0;
1284202046Simp	bz->reserved_bpages = 0;
1285202046Simp	bz->active_bpages = 0;
1286202046Simp	bz->lowaddr = dmat->lowaddr;
1287202046Simp	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1288202046Simp	bz->map_count = 0;
1289202046Simp	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1290202046Simp	busdma_zonecount++;
1291202046Simp	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1292202046Simp	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1293202046Simp	dmat->bounce_zone = bz;
1294202046Simp
1295202046Simp	sysctl_ctx_init(&bz->sysctl_tree);
1296202046Simp	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1297202046Simp	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1298202046Simp	    CTLFLAG_RD, 0, "");
1299202046Simp	if (bz->sysctl_tree_top == NULL) {
1300202046Simp		sysctl_ctx_free(&bz->sysctl_tree);
1301202046Simp		return (0);	/* XXX error code? */
1302202046Simp	}
1303202046Simp
1304202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1305202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1306202046Simp	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1307202046Simp	    "Total bounce pages");
1308202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1309202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1310202046Simp	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1311202046Simp	    "Free bounce pages");
1312202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1313202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1314202046Simp	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1315202046Simp	    "Reserved bounce pages");
1316202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1317202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1318202046Simp	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1319202046Simp	    "Active bounce pages");
1320202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1321202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1322202046Simp	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1323202046Simp	    "Total bounce requests");
1324202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1325202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1326202046Simp	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1327202046Simp	    "Total bounce requests that were deferred");
1328202046Simp	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1329202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1330202046Simp	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1331273377Shselasky	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1332202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1333273377Shselasky	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1334202046Simp
1335202046Simp	return (0);
1336202046Simp}
1337202046Simp
1338202046Simpstatic int
1339202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1340202046Simp{
1341202046Simp	struct bounce_zone *bz;
1342202046Simp	int count;
1343202046Simp
1344202046Simp	bz = dmat->bounce_zone;
1345202046Simp	count = 0;
1346202046Simp	while (numpages > 0) {
1347202046Simp		struct bounce_page *bpage;
1348202046Simp
1349289701Sian		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
1350202046Simp						     M_NOWAIT | M_ZERO);
1351202046Simp
1352202046Simp		if (bpage == NULL)
1353202046Simp			break;
1354289701Sian		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
1355202046Simp							 M_NOWAIT, 0ul,
1356202046Simp							 bz->lowaddr,
1357202046Simp							 PAGE_SIZE,
1358202046Simp							 0);
1359202046Simp		if (bpage->vaddr == 0) {
1360289708Sian			free(bpage, M_BUSDMA);
1361202046Simp			break;
1362202046Simp		}
1363202046Simp		bpage->busaddr = pmap_kextract(bpage->vaddr);
1364202046Simp		bpage->vaddr_nocache =
1365212283Sjchandra		    (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
1366202046Simp		mtx_lock(&bounce_lock);
1367202046Simp		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1368202046Simp		total_bpages++;
1369202046Simp		bz->total_bpages++;
1370202046Simp		bz->free_bpages++;
1371202046Simp		mtx_unlock(&bounce_lock);
1372202046Simp		count++;
1373202046Simp		numpages--;
1374202046Simp	}
1375202046Simp	return (count);
1376202046Simp}
1377202046Simp
1378202046Simpstatic int
1379202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1380202046Simp{
1381202046Simp	struct bounce_zone *bz;
1382202046Simp	int pages;
1383202046Simp
1384202046Simp	mtx_assert(&bounce_lock, MA_OWNED);
1385202046Simp	bz = dmat->bounce_zone;
1386202046Simp	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1387202046Simp	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1388202046Simp		return (map->pagesneeded - (map->pagesreserved + pages));
1389202046Simp	bz->free_bpages -= pages;
1390202046Simp	bz->reserved_bpages += pages;
1391202046Simp	map->pagesreserved += pages;
1392202046Simp	pages = map->pagesneeded - map->pagesreserved;
1393202046Simp
1394202046Simp	return (pages);
1395202046Simp}
1396202046Simp
1397202046Simpstatic bus_addr_t
1398202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1399246713Skib		bus_addr_t addr, bus_size_t size)
1400202046Simp{
1401202046Simp	struct bounce_zone *bz;
1402202046Simp	struct bounce_page *bpage;
1403202046Simp
1404202046Simp	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1405202046Simp	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1406202046Simp
1407202046Simp	bz = dmat->bounce_zone;
1408202046Simp	if (map->pagesneeded == 0)
1409202046Simp		panic("add_bounce_page: map doesn't need any pages");
1410202046Simp	map->pagesneeded--;
1411202046Simp
1412202046Simp	if (map->pagesreserved == 0)
1413202046Simp		panic("add_bounce_page: map doesn't need any pages");
1414202046Simp	map->pagesreserved--;
1415202046Simp
1416202046Simp	mtx_lock(&bounce_lock);
1417202046Simp	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1418202046Simp	if (bpage == NULL)
1419202046Simp		panic("add_bounce_page: free page list is empty");
1420202046Simp
1421202046Simp	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1422202046Simp	bz->reserved_bpages--;
1423202046Simp	bz->active_bpages++;
1424202046Simp	mtx_unlock(&bounce_lock);
1425202046Simp
1426202046Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1427202046Simp		/* Page offset needs to be preserved. */
1428282120Shselasky		bpage->vaddr |= addr & PAGE_MASK;
1429282120Shselasky		bpage->busaddr |= addr & PAGE_MASK;
1430202046Simp	}
1431202046Simp	bpage->datavaddr = vaddr;
1432246713Skib	bpage->dataaddr = addr;
1433202046Simp	bpage->datacount = size;
1434202046Simp	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1435202046Simp	return (bpage->busaddr);
1436202046Simp}
1437202046Simp
1438202046Simpstatic void
1439202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1440202046Simp{
1441202046Simp	struct bus_dmamap *map;
1442202046Simp	struct bounce_zone *bz;
1443202046Simp
1444202046Simp	bz = dmat->bounce_zone;
1445202046Simp	bpage->datavaddr = 0;
1446202046Simp	bpage->datacount = 0;
1447202046Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1448202046Simp		/*
1449202046Simp		 * Reset the bounce page to start at offset 0.  Other uses
1450202046Simp		 * of this bounce page may need to store a full page of
1451202046Simp		 * data and/or assume it starts on a page boundary.
1452202046Simp		 */
1453202046Simp		bpage->vaddr &= ~PAGE_MASK;
1454202046Simp		bpage->busaddr &= ~PAGE_MASK;
1455202046Simp	}
1456202046Simp
1457202046Simp	mtx_lock(&bounce_lock);
1458202046Simp	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1459202046Simp	bz->free_bpages++;
1460202046Simp	bz->active_bpages--;
1461202046Simp	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1462202046Simp		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1463202046Simp			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1464202046Simp			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1465202046Simp					   map, links);
1466202046Simp			busdma_swi_pending = 1;
1467202046Simp			bz->total_deferred++;
1468202046Simp			swi_sched(vm_ih, 0);
1469202046Simp		}
1470202046Simp	}
1471202046Simp	mtx_unlock(&bounce_lock);
1472202046Simp}
1473202046Simp
1474202046Simpvoid
1475202046Simpbusdma_swi(void)
1476202046Simp{
1477202046Simp	bus_dma_tag_t dmat;
1478202046Simp	struct bus_dmamap *map;
1479202046Simp
1480202046Simp	mtx_lock(&bounce_lock);
1481202046Simp	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1482202046Simp		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1483202046Simp		mtx_unlock(&bounce_lock);
1484202046Simp		dmat = map->dmat;
1485202046Simp		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1486246713Skib		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1487246713Skib		    map->callback_arg, BUS_DMA_WAITOK);
1488202046Simp		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1489202046Simp		mtx_lock(&bounce_lock);
1490202046Simp	}
1491202046Simp	mtx_unlock(&bounce_lock);
1492202046Simp}
1493