1178172Simp/*-
2195162Simp * Copyright (c) 2006 Oleksandr Tymoshenko
3178172Simp * All rights reserved.
4178172Simp *
5178172Simp * Redistribution and use in source and binary forms, with or without
6178172Simp * modification, are permitted provided that the following conditions
7178172Simp * are met:
8178172Simp * 1. Redistributions of source code must retain the above copyright
9178172Simp *    notice, this list of conditions, and the following disclaimer,
10178172Simp *    without modification, immediately at the beginning of the file.
11178172Simp * 2. The name of the author may not be used to endorse or promote products
12178172Simp *    derived from this software without specific prior written permission.
13178172Simp *
14178172Simp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17178172Simp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18178172Simp * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24178172Simp * SUCH DAMAGE.
25178172Simp *
26202046Simp *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27178172Simp */
28178172Simp
29178172Simp#include <sys/cdefs.h>
30178172Simp__FBSDID("$FreeBSD: stable/11/sys/mips/mips/busdma_machdep.c 318976 2017-05-27 07:47:52Z hselasky $");
31178172Simp
32202046Simp/*
33202046Simp * MIPS bus dma support routines
34202046Simp */
35202046Simp
36178172Simp#include <sys/param.h>
37178172Simp#include <sys/systm.h>
38178172Simp#include <sys/malloc.h>
39178172Simp#include <sys/bus.h>
40289701Sian#include <sys/busdma_bufalloc.h>
41178172Simp#include <sys/interrupt.h>
42178172Simp#include <sys/lock.h>
43178172Simp#include <sys/proc.h>
44246713Skib#include <sys/memdesc.h>
45178172Simp#include <sys/mutex.h>
46178172Simp#include <sys/ktr.h>
47178172Simp#include <sys/kernel.h>
48202046Simp#include <sys/sysctl.h>
49246713Skib#include <sys/uio.h>
50178172Simp
51289701Sian#include <vm/uma.h>
52178172Simp#include <vm/vm.h>
53289701Sian#include <vm/vm_extern.h>
54289701Sian#include <vm/vm_kern.h>
55178172Simp#include <vm/vm_page.h>
56178172Simp#include <vm/vm_map.h>
57178172Simp
58178172Simp#include <machine/atomic.h>
59178172Simp#include <machine/bus.h>
60178172Simp#include <machine/cache.h>
61178172Simp#include <machine/cpufunc.h>
62204689Sneel#include <machine/cpuinfo.h>
63202046Simp#include <machine/md_var.h>
64178172Simp
65202046Simp#define MAX_BPAGES 64
66202046Simp#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
67202046Simp#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
68202046Simp
69202046Simpstruct bounce_zone;
70202046Simp
71178172Simpstruct bus_dma_tag {
72178172Simp	bus_dma_tag_t		parent;
73178172Simp	bus_size_t		alignment;
74232356Sjhb	bus_addr_t		boundary;
75178172Simp	bus_addr_t		lowaddr;
76178172Simp	bus_addr_t		highaddr;
77178172Simp	bus_dma_filter_t	*filter;
78178172Simp	void			*filterarg;
79178172Simp	bus_size_t		maxsize;
80178172Simp	u_int			nsegments;
81178172Simp	bus_size_t		maxsegsz;
82178172Simp	int			flags;
83178172Simp	int			ref_count;
84178172Simp	int			map_count;
85178172Simp	bus_dma_lock_t		*lockfunc;
86178172Simp	void			*lockfuncarg;
87240177Sjhb	bus_dma_segment_t	*segments;
88202046Simp	struct bounce_zone *bounce_zone;
89178172Simp};
90178172Simp
91202046Simpstruct bounce_page {
92202046Simp	vm_offset_t	vaddr;		/* kva of bounce buffer */
93202046Simp	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
94202046Simp	bus_addr_t	busaddr;	/* Physical address */
95202046Simp	vm_offset_t	datavaddr;	/* kva of client data */
96246713Skib	bus_addr_t	dataaddr;	/* client physical address */
97202046Simp	bus_size_t	datacount;	/* client data count */
98202046Simp	STAILQ_ENTRY(bounce_page) links;
99202046Simp};
100202046Simp
101246713Skibstruct sync_list {
102246713Skib	vm_offset_t	vaddr;		/* kva of bounce buffer */
103246713Skib	bus_addr_t	busaddr;	/* Physical address */
104246713Skib	bus_size_t	datacount;	/* client data count */
105246713Skib};
106246713Skib
107202046Simpint busdma_swi_pending;
108202046Simp
109202046Simpstruct bounce_zone {
110202046Simp	STAILQ_ENTRY(bounce_zone) links;
111202046Simp	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112202046Simp	int		total_bpages;
113202046Simp	int		free_bpages;
114202046Simp	int		reserved_bpages;
115202046Simp	int		active_bpages;
116202046Simp	int		total_bounced;
117202046Simp	int		total_deferred;
118202046Simp	int		map_count;
119202046Simp	bus_size_t	alignment;
120202046Simp	bus_addr_t	lowaddr;
121202046Simp	char		zoneid[8];
122202046Simp	char		lowaddrid[20];
123202046Simp	struct sysctl_ctx_list sysctl_tree;
124202046Simp	struct sysctl_oid *sysctl_tree_top;
125202046Simp};
126202046Simp
127202046Simpstatic struct mtx bounce_lock;
128202046Simpstatic int total_bpages;
129202046Simpstatic int busdma_zonecount;
130202046Simpstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
131202046Simp
132227309Sedstatic SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
133202046SimpSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
134202046Simp	   "Total bounce pages");
135202046Simp
136289701Sian#define DMAMAP_UNCACHEABLE	0x08
137289701Sian#define DMAMAP_CACHE_ALIGNED	0x10
138202046Simp
139178172Simpstruct bus_dmamap {
140202046Simp	struct bp_list	bpages;
141202046Simp	int		pagesneeded;
142202046Simp	int		pagesreserved;
143212284Sjchandra	bus_dma_tag_t	dmat;
144246713Skib	struct memdesc	mem;
145178172Simp	int		flags;
146178172Simp	void		*origbuffer;
147178172Simp	void		*allocbuffer;
148178172Simp	TAILQ_ENTRY(bus_dmamap)	freelist;
149202046Simp	STAILQ_ENTRY(bus_dmamap) links;
150202046Simp	bus_dmamap_callback_t *callback;
151212284Sjchandra	void		*callback_arg;
152246713Skib	int		sync_count;
153246713Skib	struct sync_list *slist;
154178172Simp};
155178172Simp
156202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157202046Simpstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158202046Simp
159202046Simpstatic void init_bounce_pages(void *dummy);
160202046Simpstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
161202046Simpstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
162202046Simpstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
163202046Simp				int commit);
164202046Simpstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
165246713Skib				  vm_offset_t vaddr, bus_addr_t addr,
166246713Skib				  bus_size_t size);
167202046Simpstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
168202046Simp
169202046Simp/* Default tag, as most drivers provide no parent tag. */
170202046Simpbus_dma_tag_t mips_root_dma_tag;
171202046Simp
172289701Sianstatic uma_zone_t dmamap_zone;	/* Cache of struct bus_dmamap items */
173289701Sian
174289701Sianstatic busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
175289701Sianstatic busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
176289701Sian
177289701SianMALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
178289701SianMALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
179289701Sian
180202046Simp/*
181289701Sian * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
182289701Sian * It'll need platform-specific changes if this code is copied.
183289701Sian */
184289701Sianstatic int
185289701Siandmamap_ctor(void *mem, int size, void *arg, int flags)
186289701Sian{
187289701Sian	bus_dmamap_t map;
188289701Sian	bus_dma_tag_t dmat;
189289701Sian
190289701Sian	map = (bus_dmamap_t)mem;
191289701Sian	dmat = (bus_dma_tag_t)arg;
192289701Sian
193289701Sian	dmat->map_count++;
194289701Sian
195289701Sian	map->dmat = dmat;
196289701Sian	map->flags = 0;
197289701Sian	map->slist = NULL;
198289701Sian	map->allocbuffer = NULL;
199289701Sian	map->sync_count = 0;
200289701Sian	STAILQ_INIT(&map->bpages);
201289701Sian
202289701Sian	return (0);
203289701Sian}
204289701Sian
205289701Sian/*
206289701Sian * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
207289701Sian * It may need platform-specific changes if this code is copied              .
208289701Sian */
209289701Sianstatic void
210289701Siandmamap_dtor(void *mem, int size, void *arg)
211289701Sian{
212289701Sian	bus_dmamap_t map;
213289701Sian
214289701Sian	map = (bus_dmamap_t)mem;
215289701Sian
216289701Sian	map->dmat->map_count--;
217289701Sian}
218289701Sian
219289701Sianstatic void
220289701Sianbusdma_init(void *dummy)
221289701Sian{
222289701Sian
223289701Sian	/* Create a cache of maps for bus_dmamap_create(). */
224289701Sian	dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
225289701Sian	    dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
226289701Sian
227289701Sian	/* Create a cache of buffers in standard (cacheable) memory. */
228289701Sian	standard_allocator = busdma_bufalloc_create("buffer",
229289701Sian	    mips_pdcache_linesize,	/* minimum_alignment */
230289701Sian	    NULL,			/* uma_alloc func */
231289701Sian	    NULL,			/* uma_free func */
232289701Sian	    0);				/* uma_zcreate_flags */
233289701Sian
234289701Sian	/*
235289701Sian	 * Create a cache of buffers in uncacheable memory, to implement the
236289701Sian	 * BUS_DMA_COHERENT flag.
237289701Sian	 */
238289701Sian	coherent_allocator = busdma_bufalloc_create("coherent",
239289701Sian	    mips_pdcache_linesize,	/* minimum_alignment */
240289701Sian	    busdma_bufalloc_alloc_uncacheable,
241289701Sian	    busdma_bufalloc_free_uncacheable,
242289701Sian	    0);				/* uma_zcreate_flags */
243289701Sian}
244289701SianSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
245289701Sian
246289701Sian/*
247202046Simp * Return true if a match is made.
248202046Simp *
249202046Simp * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
250202046Simp *
251202046Simp * If paddr is within the bounds of the dma tag then call the filter callback
252202046Simp * to check for a match, if there is no filter callback then assume a match.
253202046Simp */
254202046Simpstatic int
255202046Simprun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
256202046Simp{
257202046Simp	int retval;
258202046Simp
259202046Simp	retval = 0;
260202046Simp
261202046Simp	do {
262202046Simp		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
263202046Simp		 || ((paddr & (dmat->alignment - 1)) != 0))
264202046Simp		 && (dmat->filter == NULL
265202046Simp		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
266202046Simp			retval = 1;
267202046Simp
268202046Simp		dmat = dmat->parent;
269202046Simp	} while (retval == 0 && dmat != NULL);
270202046Simp	return (retval);
271202046Simp}
272202046Simp
273178172Simp/*
274178172Simp * Check to see if the specified page is in an allowed DMA range.
275178172Simp */
276178172Simp
277178172Simpstatic __inline int
278202046Simp_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
279202046Simp{
280202046Simp	int i;
281202046Simp	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
282202046Simp		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
283202046Simp		    || (lowaddr < phys_avail[i] &&
284202046Simp		    highaddr > phys_avail[i]))
285202046Simp			return (1);
286202046Simp	}
287202046Simp	return (0);
288202046Simp}
289202046Simp
290178172Simp/*
291178172Simp * Convenience function for manipulating driver locks from busdma (during
292178172Simp * busdma_swi, for example).  Drivers that don't provide their own locks
293178172Simp * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
294178172Simp * non-mutex locking scheme don't have to use this at all.
295178172Simp */
296178172Simpvoid
297178172Simpbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
298178172Simp{
299178172Simp	struct mtx *dmtx;
300178172Simp
301178172Simp	dmtx = (struct mtx *)arg;
302178172Simp	switch (op) {
303178172Simp	case BUS_DMA_LOCK:
304178172Simp		mtx_lock(dmtx);
305178172Simp		break;
306178172Simp	case BUS_DMA_UNLOCK:
307178172Simp		mtx_unlock(dmtx);
308178172Simp		break;
309178172Simp	default:
310178172Simp		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
311178172Simp	}
312178172Simp}
313178172Simp
314178172Simp/*
315178172Simp * dflt_lock should never get called.  It gets put into the dma tag when
316178172Simp * lockfunc == NULL, which is only valid if the maps that are associated
317178172Simp * with the tag are meant to never be defered.
318178172Simp * XXX Should have a way to identify which driver is responsible here.
319178172Simp */
320178172Simpstatic void
321178172Simpdflt_lock(void *arg, bus_dma_lock_op_t op)
322178172Simp{
323178172Simp#ifdef INVARIANTS
324178172Simp	panic("driver error: busdma dflt_lock called");
325178172Simp#else
326178172Simp	printf("DRIVER_ERROR: busdma dflt_lock called\n");
327178172Simp#endif
328178172Simp}
329178172Simp
330178172Simpstatic __inline bus_dmamap_t
331246713Skib_busdma_alloc_dmamap(bus_dma_tag_t dmat)
332178172Simp{
333246713Skib	struct sync_list *slist;
334178172Simp	bus_dmamap_t map;
335178172Simp
336289701Sian	slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
337246713Skib	if (slist == NULL)
338246713Skib		return (NULL);
339289701Sian	map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
340289701Sian	if (map != NULL)
341246713Skib		map->slist = slist;
342289701Sian	else
343289708Sian		free(slist, M_BUSDMA);
344178172Simp	return (map);
345178172Simp}
346178172Simp
347178172Simpstatic __inline void
348178172Simp_busdma_free_dmamap(bus_dmamap_t map)
349178172Simp{
350289708Sian
351289708Sian	free(map->slist, M_BUSDMA);
352289701Sian	uma_zfree(dmamap_zone, map);
353178172Simp}
354178172Simp
355202046Simp/*
356202046Simp * Allocate a device specific dma_tag.
357202046Simp */
358202046Simp#define SEG_NB 1024
359202046Simp
360178172Simpint
361178172Simpbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
362232356Sjhb    bus_addr_t boundary, bus_addr_t lowaddr,
363212284Sjchandra    bus_addr_t highaddr, bus_dma_filter_t *filter,
364212284Sjchandra    void *filterarg, bus_size_t maxsize, int nsegments,
365212284Sjchandra    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
366212284Sjchandra    void *lockfuncarg, bus_dma_tag_t *dmat)
367178172Simp{
368178172Simp	bus_dma_tag_t newtag;
369178172Simp	int error = 0;
370178172Simp	/* Return a NULL tag on failure */
371178172Simp	*dmat = NULL;
372202046Simp	if (!parent)
373202046Simp		parent = mips_root_dma_tag;
374178172Simp
375289701Sian	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
376178172Simp	if (newtag == NULL) {
377178172Simp		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
378178172Simp		    __func__, newtag, 0, error);
379178172Simp		return (ENOMEM);
380178172Simp	}
381178172Simp
382178172Simp	newtag->parent = parent;
383178172Simp	newtag->alignment = alignment;
384178172Simp	newtag->boundary = boundary;
385202046Simp	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
386202046Simp	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
387178172Simp	newtag->filter = filter;
388178172Simp	newtag->filterarg = filterarg;
389212284Sjchandra	newtag->maxsize = maxsize;
390212284Sjchandra	newtag->nsegments = nsegments;
391178172Simp	newtag->maxsegsz = maxsegsz;
392178172Simp	newtag->flags = flags;
393204689Sneel	if (cpuinfo.cache_coherent_dma)
394204689Sneel		newtag->flags |= BUS_DMA_COHERENT;
395178172Simp	newtag->ref_count = 1; /* Count ourself */
396178172Simp	newtag->map_count = 0;
397178172Simp	if (lockfunc != NULL) {
398178172Simp		newtag->lockfunc = lockfunc;
399178172Simp		newtag->lockfuncarg = lockfuncarg;
400178172Simp	} else {
401178172Simp		newtag->lockfunc = dflt_lock;
402178172Simp		newtag->lockfuncarg = NULL;
403178172Simp	}
404240177Sjhb	newtag->segments = NULL;
405240177Sjhb
406212284Sjchandra	/*
407202046Simp	 * Take into account any restrictions imposed by our parent tag
408202046Simp	 */
409212284Sjchandra	if (parent != NULL) {
410232356Sjhb		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
411232356Sjhb		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
412178172Simp		if (newtag->boundary == 0)
413178172Simp			newtag->boundary = parent->boundary;
414178172Simp		else if (parent->boundary != 0)
415212284Sjchandra			newtag->boundary =
416232356Sjhb			    MIN(parent->boundary, newtag->boundary);
417202046Simp		if ((newtag->filter != NULL) ||
418202046Simp		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
419202046Simp			newtag->flags |= BUS_DMA_COULD_BOUNCE;
420212284Sjchandra		if (newtag->filter == NULL) {
421212284Sjchandra			/*
422212284Sjchandra			* Short circuit looking at our parent directly
423212284Sjchandra			* since we have encapsulated all of its information
424212284Sjchandra			*/
425212284Sjchandra			newtag->filter = parent->filter;
426212284Sjchandra			newtag->filterarg = parent->filterarg;
427212284Sjchandra			newtag->parent = parent->parent;
428178172Simp		}
429178172Simp		if (newtag->parent != NULL)
430178172Simp			atomic_add_int(&parent->ref_count, 1);
431178172Simp	}
432202046Simp	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
433202046Simp	 || newtag->alignment > 1)
434202046Simp		newtag->flags |= BUS_DMA_COULD_BOUNCE;
435178172Simp
436202046Simp	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
437202046Simp	    (flags & BUS_DMA_ALLOCNOW) != 0) {
438202046Simp		struct bounce_zone *bz;
439202046Simp
440202046Simp		/* Must bounce */
441202046Simp
442202046Simp		if ((error = alloc_bounce_zone(newtag)) != 0) {
443289708Sian			free(newtag, M_BUSDMA);
444202046Simp			return (error);
445202046Simp		}
446202046Simp		bz = newtag->bounce_zone;
447202046Simp
448202046Simp		if (ptoa(bz->total_bpages) < maxsize) {
449202046Simp			int pages;
450202046Simp
451202046Simp			pages = atop(maxsize) - bz->total_bpages;
452202046Simp
453202046Simp			/* Add pages to our bounce pool */
454202046Simp			if (alloc_bounce_pages(newtag, pages) < pages)
455202046Simp				error = ENOMEM;
456202046Simp		}
457202046Simp		/* Performed initial allocation */
458202046Simp		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
459202046Simp	} else
460202046Simp		newtag->bounce_zone = NULL;
461202046Simp	if (error != 0)
462289708Sian		free(newtag, M_BUSDMA);
463202046Simp	else
464178172Simp		*dmat = newtag;
465178172Simp	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
466178172Simp	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
467202046Simp
468178172Simp	return (error);
469178172Simp}
470178172Simp
471178172Simpint
472178172Simpbus_dma_tag_destroy(bus_dma_tag_t dmat)
473178172Simp{
474178172Simp#ifdef KTR
475178172Simp	bus_dma_tag_t dmat_copy = dmat;
476178172Simp#endif
477178172Simp
478178172Simp	if (dmat != NULL) {
479212284Sjchandra		if (dmat->map_count != 0)
480212284Sjchandra			return (EBUSY);
481178172Simp
482212284Sjchandra		while (dmat != NULL) {
483212284Sjchandra			bus_dma_tag_t parent;
484178172Simp
485212284Sjchandra			parent = dmat->parent;
486212284Sjchandra			atomic_subtract_int(&dmat->ref_count, 1);
487212284Sjchandra			if (dmat->ref_count == 0) {
488240177Sjhb				if (dmat->segments != NULL)
489289708Sian					free(dmat->segments, M_BUSDMA);
490289708Sian				free(dmat, M_BUSDMA);
491212284Sjchandra				/*
492212284Sjchandra				 * Last reference count, so
493212284Sjchandra				 * release our reference
494212284Sjchandra				 * count on our parent.
495212284Sjchandra				 */
496212284Sjchandra				dmat = parent;
497212284Sjchandra			} else
498240177Sjhb				dmat = NULL;
499212284Sjchandra		}
500212284Sjchandra	}
501178172Simp	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
502178172Simp
503212284Sjchandra	return (0);
504178172Simp}
505178172Simp
506202046Simp#include <sys/kdb.h>
507178172Simp/*
508178172Simp * Allocate a handle for mapping from kva/uva/physical
509178172Simp * address space into bus device space.
510178172Simp */
511178172Simpint
512178172Simpbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
513178172Simp{
514178172Simp	bus_dmamap_t newmap;
515178172Simp	int error = 0;
516178172Simp
517240177Sjhb	if (dmat->segments == NULL) {
518240177Sjhb		dmat->segments = (bus_dma_segment_t *)malloc(
519289701Sian		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
520240177Sjhb		    M_NOWAIT);
521240177Sjhb		if (dmat->segments == NULL) {
522240177Sjhb			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
523240177Sjhb			    __func__, dmat, ENOMEM);
524240177Sjhb			return (ENOMEM);
525240177Sjhb		}
526240177Sjhb	}
527240177Sjhb
528246713Skib	newmap = _busdma_alloc_dmamap(dmat);
529178172Simp	if (newmap == NULL) {
530178172Simp		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
531178172Simp		return (ENOMEM);
532178172Simp	}
533178172Simp	*mapp = newmap;
534178172Simp
535202046Simp	/*
536202046Simp	 * Bouncing might be required if the driver asks for an active
537202046Simp	 * exclusion region, a data alignment that is stricter than 1, and/or
538202046Simp	 * an active address boundary.
539202046Simp	 */
540202046Simp	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
541202046Simp
542202046Simp		/* Must bounce */
543202046Simp		struct bounce_zone *bz;
544202046Simp		int maxpages;
545202046Simp
546202046Simp		if (dmat->bounce_zone == NULL) {
547202046Simp			if ((error = alloc_bounce_zone(dmat)) != 0) {
548202046Simp				_busdma_free_dmamap(newmap);
549202046Simp				*mapp = NULL;
550202046Simp				return (error);
551202046Simp			}
552202046Simp		}
553202046Simp		bz = dmat->bounce_zone;
554202046Simp
555202046Simp		/* Initialize the new map */
556202046Simp		STAILQ_INIT(&((*mapp)->bpages));
557202046Simp
558202046Simp		/*
559202046Simp		 * Attempt to add pages to our pool on a per-instance
560202046Simp		 * basis up to a sane limit.
561202046Simp		 */
562202046Simp		maxpages = MAX_BPAGES;
563291193Sskra		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
564291193Sskra		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
565202046Simp			int pages;
566202046Simp
567202046Simp			pages = MAX(atop(dmat->maxsize), 1);
568202046Simp			pages = MIN(maxpages - bz->total_bpages, pages);
569202046Simp			pages = MAX(pages, 1);
570202046Simp			if (alloc_bounce_pages(dmat, pages) < pages)
571202046Simp				error = ENOMEM;
572202046Simp
573202046Simp			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
574202046Simp				if (error == 0)
575202046Simp					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
576202046Simp			} else {
577202046Simp				error = 0;
578202046Simp			}
579202046Simp		}
580202046Simp		bz->map_count++;
581202046Simp	}
582202046Simp
583178172Simp	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
584178172Simp	    __func__, dmat, dmat->flags, error);
585178172Simp
586178172Simp	return (0);
587178172Simp}
588178172Simp
589178172Simp/*
590178172Simp * Destroy a handle for mapping from kva/uva/physical
591178172Simp * address space into bus device space.
592178172Simp */
593178172Simpint
594178172Simpbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
595178172Simp{
596202046Simp
597246713Skib	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
598202046Simp		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
599202046Simp		    __func__, dmat, EBUSY);
600202046Simp		return (EBUSY);
601202046Simp	}
602202046Simp	if (dmat->bounce_zone)
603202046Simp		dmat->bounce_zone->map_count--;
604242465Sadrian	_busdma_free_dmamap(map);
605178172Simp	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
606178172Simp        return (0);
607178172Simp}
608178172Simp
609178172Simp/*
610178172Simp * Allocate a piece of memory that can be efficiently mapped into
611178172Simp * bus device space based on the constraints lited in the dma tag.
612178172Simp * A dmamap to for use with dmamap_load is also allocated.
613178172Simp */
614178172Simpint
615289701Sianbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
616212284Sjchandra    bus_dmamap_t *mapp)
617178172Simp{
618178172Simp	bus_dmamap_t newmap = NULL;
619289701Sian	busdma_bufalloc_t ba;
620289701Sian	struct busdma_bufzone *bufzone;
621289701Sian	vm_memattr_t memattr;
622289701Sian	void *vaddr;
623178172Simp
624178172Simp	int mflags;
625178172Simp
626178172Simp	if (flags & BUS_DMA_NOWAIT)
627178172Simp		mflags = M_NOWAIT;
628178172Simp	else
629178172Simp		mflags = M_WAITOK;
630240177Sjhb	if (dmat->segments == NULL) {
631240177Sjhb		dmat->segments = (bus_dma_segment_t *)malloc(
632289701Sian		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
633240177Sjhb		    mflags);
634240177Sjhb		if (dmat->segments == NULL) {
635240177Sjhb			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
636240177Sjhb			    __func__, dmat, dmat->flags, ENOMEM);
637240177Sjhb			return (ENOMEM);
638240177Sjhb		}
639240177Sjhb	}
640178172Simp
641246713Skib	newmap = _busdma_alloc_dmamap(dmat);
642178172Simp	if (newmap == NULL) {
643178172Simp		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
644178172Simp		    __func__, dmat, dmat->flags, ENOMEM);
645178172Simp		return (ENOMEM);
646178172Simp	}
647202046Simp
648204689Sneel	/*
649204689Sneel	 * If all the memory is coherent with DMA then we don't need to
650204689Sneel	 * do anything special for a coherent mapping request.
651204689Sneel	 */
652204689Sneel	if (dmat->flags & BUS_DMA_COHERENT)
653204689Sneel	    flags &= ~BUS_DMA_COHERENT;
654204689Sneel
655289701Sian	if (flags & BUS_DMA_COHERENT) {
656289701Sian		memattr = VM_MEMATTR_UNCACHEABLE;
657289701Sian		ba = coherent_allocator;
658289701Sian		newmap->flags |= DMAMAP_UNCACHEABLE;
659289701Sian	} else {
660289701Sian		memattr = VM_MEMATTR_DEFAULT;
661289701Sian		ba = standard_allocator;
662289701Sian	}
663289701Sian	/* All buffers we allocate are cache-aligned. */
664289701Sian	newmap->flags |= DMAMAP_CACHE_ALIGNED;
665289701Sian
666289701Sian	if (flags & BUS_DMA_ZERO)
667289701Sian		mflags |= M_ZERO;
668289701Sian
669204689Sneel	/*
670289701Sian	 * Try to find a bufzone in the allocator that holds a cache of buffers
671289701Sian	 * of the right size for this request.  If the buffer is too big to be
672289701Sian	 * held in the allocator cache, this returns NULL.
673204689Sneel	 */
674289701Sian	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
675204689Sneel
676289701Sian	/*
677289701Sian	 * Allocate the buffer from the uma(9) allocator if...
678289701Sian	 *  - It's small enough to be in the allocator (bufzone not NULL).
679289701Sian	 *  - The alignment constraint isn't larger than the allocation size
680289701Sian	 *    (the allocator aligns buffers to their size boundaries).
681289701Sian	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
682289701Sian	 * else allocate non-contiguous pages if...
683318976Shselasky	 *  - The page count that could get allocated doesn't exceed
684318976Shselasky	 *    nsegments also when the maximum segment size is less
685318976Shselasky	 *    than PAGE_SIZE.
686289701Sian	 *  - The alignment constraint isn't larger than a page boundary.
687289701Sian	 *  - There are no boundary-crossing constraints.
688289701Sian	 * else allocate a block of contiguous pages because one or more of the
689289701Sian	 * constraints is something that only the contig allocator can fulfill.
690289701Sian	 */
691289701Sian	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
692289701Sian	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
693289701Sian		vaddr = uma_zalloc(bufzone->umazone, mflags);
694318976Shselasky	} else if (dmat->nsegments >=
695318976Shselasky	    howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
696318976Shselasky	    dmat->alignment <= PAGE_SIZE &&
697318976Shselasky	    (dmat->boundary % PAGE_SIZE) == 0) {
698289701Sian		vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
699289701Sian		    mflags, 0, dmat->lowaddr, memattr);
700212284Sjchandra	} else {
701289701Sian		vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
702289701Sian		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
703289701Sian		    memattr);
704212284Sjchandra	}
705289701Sian	if (vaddr == NULL) {
706289701Sian		_busdma_free_dmamap(newmap);
707289701Sian		newmap = NULL;
708289701Sian	} else {
709289701Sian		newmap->sync_count = 0;
710178172Simp	}
711289701Sian	*vaddrp = vaddr;
712289701Sian	*mapp = newmap;
713202046Simp
714289701Sian	return (vaddr == NULL ? ENOMEM : 0);
715178172Simp}
716178172Simp
717178172Simp/*
718178172Simp * Free a piece of memory and it's allocated dmamap, that was allocated
719178172Simp * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
720178172Simp */
721178172Simpvoid
722178172Simpbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
723178172Simp{
724289701Sian	struct busdma_bufzone *bufzone;
725289701Sian	busdma_bufalloc_t ba;
726202046Simp
727212283Sjchandra	if (map->flags & DMAMAP_UNCACHEABLE)
728289701Sian		ba = coherent_allocator;
729212284Sjchandra	else
730289701Sian		ba = standard_allocator;
731202046Simp
732289708Sian	free(map->slist, M_BUSDMA);
733289701Sian	uma_zfree(dmamap_zone, map);
734289701Sian
735289701Sian	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
736289701Sian
737289701Sian	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
738289701Sian	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
739289701Sian		uma_zfree(bufzone->umazone, vaddr);
740289701Sian	else
741289701Sian		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
742178172Simp	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
743202046Simp}
744178172Simp
745246713Skibstatic void
746246713Skib_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
747246713Skib    bus_size_t buflen, int flags)
748246713Skib{
749246713Skib	bus_addr_t curaddr;
750246713Skib	bus_size_t sgsize;
751246713Skib
752246713Skib	if ((map->pagesneeded == 0)) {
753246713Skib		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
754246713Skib		    dmat->lowaddr, dmat->boundary, dmat->alignment);
755246713Skib		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
756246713Skib		    map, map->pagesneeded);
757246713Skib		/*
758246713Skib		 * Count the number of bounce pages
759246713Skib		 * needed in order to complete this transfer
760246713Skib		 */
761246713Skib		curaddr = buf;
762246713Skib		while (buflen != 0) {
763246713Skib			sgsize = MIN(buflen, dmat->maxsegsz);
764246713Skib			if (run_filter(dmat, curaddr) != 0) {
765246713Skib				sgsize = MIN(sgsize, PAGE_SIZE);
766246713Skib				map->pagesneeded++;
767246713Skib			}
768246713Skib			curaddr += sgsize;
769246713Skib			buflen -= sgsize;
770246713Skib		}
771246713Skib		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
772246713Skib	}
773246713Skib}
774246713Skib
775246713Skibstatic void
776202046Simp_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
777202046Simp    void *buf, bus_size_t buflen, int flags)
778202046Simp{
779202046Simp	vm_offset_t vaddr;
780202046Simp	vm_offset_t vendaddr;
781202046Simp	bus_addr_t paddr;
782202046Simp
783202046Simp	if ((map->pagesneeded == 0)) {
784202046Simp		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
785202046Simp		    dmat->lowaddr, dmat->boundary, dmat->alignment);
786202046Simp		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
787202046Simp		    map, map->pagesneeded);
788202046Simp		/*
789202046Simp		 * Count the number of bounce pages
790202046Simp		 * needed in order to complete this transfer
791202046Simp		 */
792206405Snwhitehorn		vaddr = (vm_offset_t)buf;
793202046Simp		vendaddr = (vm_offset_t)buf + buflen;
794202046Simp
795202046Simp		while (vaddr < vendaddr) {
796206405Snwhitehorn			bus_size_t sg_len;
797206405Snwhitehorn
798202046Simp			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
799206405Snwhitehorn			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
800202046Simp			paddr = pmap_kextract(vaddr);
801202046Simp			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
802206405Snwhitehorn			    run_filter(dmat, paddr) != 0) {
803206405Snwhitehorn				sg_len = roundup2(sg_len, dmat->alignment);
804202046Simp				map->pagesneeded++;
805206405Snwhitehorn			}
806206405Snwhitehorn			vaddr += sg_len;
807202046Simp		}
808202046Simp		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
809202046Simp	}
810246713Skib}
811202046Simp
812246713Skibstatic int
813246713Skib_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
814246713Skib{
815246713Skib
816202046Simp	/* Reserve Necessary Bounce Pages */
817246713Skib	mtx_lock(&bounce_lock);
818246713Skib	if (flags & BUS_DMA_NOWAIT) {
819246713Skib		if (reserve_bounce_pages(dmat, map, 0) != 0) {
820246713Skib			mtx_unlock(&bounce_lock);
821246713Skib			return (ENOMEM);
822202046Simp		}
823246713Skib	} else {
824246713Skib		if (reserve_bounce_pages(dmat, map, 1) != 0) {
825246713Skib			/* Queue us for resources */
826246713Skib			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
827246713Skib			    map, links);
828246713Skib			mtx_unlock(&bounce_lock);
829246713Skib			return (EINPROGRESS);
830246713Skib		}
831202046Simp	}
832246713Skib	mtx_unlock(&bounce_lock);
833202046Simp
834202046Simp	return (0);
835178172Simp}
836178172Simp
837178172Simp/*
838246713Skib * Add a single contiguous physical range to the segment list.
839246713Skib */
840246713Skibstatic int
841246713Skib_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
842246713Skib    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
843246713Skib{
844246713Skib	bus_addr_t baddr, bmask;
845246713Skib	int seg;
846246713Skib
847246713Skib	/*
848246713Skib	 * Make sure we don't cross any boundaries.
849246713Skib	 */
850246713Skib	bmask = ~(dmat->boundary - 1);
851246713Skib	if (dmat->boundary > 0) {
852246713Skib		baddr = (curaddr + dmat->boundary) & bmask;
853246713Skib		if (sgsize > (baddr - curaddr))
854246713Skib			sgsize = (baddr - curaddr);
855246713Skib	}
856246713Skib	/*
857246713Skib	 * Insert chunk into a segment, coalescing with
858246713Skib	 * the previous segment if possible.
859246713Skib	 */
860246713Skib	seg = *segp;
861246713Skib	if (seg >= 0 &&
862246713Skib	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
863246713Skib	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
864246713Skib	    (dmat->boundary == 0 ||
865246713Skib	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
866246713Skib		segs[seg].ds_len += sgsize;
867246713Skib	} else {
868246713Skib		if (++seg >= dmat->nsegments)
869246713Skib			return (0);
870246713Skib		segs[seg].ds_addr = curaddr;
871246713Skib		segs[seg].ds_len = sgsize;
872246713Skib	}
873246713Skib	*segp = seg;
874246713Skib	return (sgsize);
875246713Skib}
876246713Skib
877246713Skib/*
878246713Skib * Utility function to load a physical buffer.  segp contains
879246713Skib * the starting segment on entrace, and the ending segment on exit.
880246713Skib */
881246713Skibint
882246713Skib_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
883246713Skib    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
884246713Skib    int *segp)
885246713Skib{
886246713Skib	bus_addr_t curaddr;
887246713Skib	bus_size_t sgsize;
888246713Skib	int error;
889246713Skib
890246713Skib	if (segs == NULL)
891246713Skib		segs = dmat->segments;
892246713Skib
893246713Skib	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
894246713Skib		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
895246713Skib		if (map->pagesneeded != 0) {
896246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
897246713Skib			if (error)
898246713Skib				return (error);
899246713Skib		}
900246713Skib	}
901246713Skib
902246713Skib	while (buflen > 0) {
903246713Skib		curaddr = buf;
904246713Skib		sgsize = MIN(buflen, dmat->maxsegsz);
905246713Skib		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
906246713Skib		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
907246713Skib			sgsize = MIN(sgsize, PAGE_SIZE);
908246713Skib			curaddr = add_bounce_page(dmat, map, 0, curaddr,
909246713Skib			    sgsize);
910246713Skib		}
911246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
912246713Skib		    segp);
913246713Skib		if (sgsize == 0)
914246713Skib			break;
915246713Skib		buf += sgsize;
916246713Skib		buflen -= sgsize;
917246713Skib	}
918246713Skib
919246713Skib	/*
920246713Skib	 * Did we fit?
921246713Skib	 */
922246713Skib	if (buflen != 0) {
923246713Skib		_bus_dmamap_unload(dmat, map);
924246713Skib		return (EFBIG); /* XXX better return value here? */
925246713Skib	}
926246713Skib	return (0);
927246713Skib}
928246713Skib
929257228Skibint
930257228Skib_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
931257228Skib    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
932257228Skib    bus_dma_segment_t *segs, int *segp)
933257228Skib{
934257228Skib
935257228Skib	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
936257228Skib	    segs, segp));
937257228Skib}
938257228Skib
939246713Skib/*
940246713Skib * Utility function to load a linear buffer.  segp contains
941178172Simp * the starting segment on entrance, and the ending segment on exit.
942178172Simp * first indicates if this is the first invocation of this function.
943178172Simp */
944246713Skibint
945246713Skib_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
946246713Skib    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
947246713Skib    int *segp)
948178172Simp{
949178172Simp	bus_size_t sgsize;
950246713Skib	bus_addr_t curaddr;
951246713Skib	struct sync_list *sl;
952178172Simp	vm_offset_t vaddr = (vm_offset_t)buf;
953178172Simp	int error = 0;
954178172Simp
955178172Simp
956246713Skib	if (segs == NULL)
957246713Skib		segs = dmat->segments;
958289716Sian	if ((flags & BUS_DMA_LOAD_MBUF) != 0)
959289716Sian		map->flags |= DMAMAP_CACHE_ALIGNED;
960246713Skib
961202046Simp	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
962246713Skib		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
963246713Skib		if (map->pagesneeded != 0) {
964246713Skib			error = _bus_dmamap_reserve_pages(dmat, map, flags);
965246713Skib			if (error)
966246713Skib				return (error);
967246713Skib		}
968202046Simp	}
969202046Simp	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
970202046Simp	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
971202046Simp
972246713Skib	while (buflen > 0) {
973178172Simp		/*
974178172Simp		 * Get the physical address for this segment.
975202046Simp		 *
976202046Simp		 * XXX Don't support checking for coherent mappings
977202046Simp		 * XXX in user address space.
978178172Simp		 */
979178172Simp		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
980178172Simp		curaddr = pmap_kextract(vaddr);
981178172Simp
982178172Simp		/*
983178172Simp		 * Compute the segment size, and adjust counts.
984178172Simp		 */
985178172Simp		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
986202046Simp		if (sgsize > dmat->maxsegsz)
987202046Simp			sgsize = dmat->maxsegsz;
988178172Simp		if (buflen < sgsize)
989178172Simp			sgsize = buflen;
990178172Simp
991202046Simp		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
992202046Simp		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
993246713Skib			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
994246713Skib			    sgsize);
995178172Simp		} else {
996246713Skib			sl = &map->slist[map->sync_count - 1];
997246713Skib			if (map->sync_count == 0 ||
998246713Skib			    vaddr != sl->vaddr + sl->datacount) {
999246713Skib				if (++map->sync_count > dmat->nsegments)
1000246713Skib					goto cleanup;
1001246713Skib				sl++;
1002246713Skib				sl->vaddr = vaddr;
1003246713Skib				sl->datacount = sgsize;
1004246713Skib				sl->busaddr = curaddr;
1005246713Skib			} else
1006246713Skib				sl->datacount += sgsize;
1007178172Simp		}
1008246713Skib		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1009246713Skib		    segp);
1010246713Skib		if (sgsize == 0)
1011178172Simp			break;
1012178172Simp		vaddr += sgsize;
1013178172Simp		buflen -= sgsize;
1014178172Simp	}
1015178172Simp
1016246713Skibcleanup:
1017178172Simp	/*
1018178172Simp	 * Did we fit?
1019178172Simp	 */
1020246713Skib	if (buflen != 0) {
1021246713Skib		_bus_dmamap_unload(dmat, map);
1022202046Simp		error = EFBIG; /* XXX better return value here? */
1023246713Skib	}
1024202046Simp	return (error);
1025178172Simp}
1026178172Simp
1027246713Skibvoid
1028246713Skib__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1029246713Skib    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
1030178172Simp{
1031178172Simp
1032178172Simp	KASSERT(dmat != NULL, ("dmatag is NULL"));
1033178172Simp	KASSERT(map != NULL, ("dmamap is NULL"));
1034246713Skib	map->mem = *mem;
1035202046Simp	map->callback = callback;
1036202046Simp	map->callback_arg = callback_arg;
1037178172Simp}
1038178172Simp
1039246713Skibbus_dma_segment_t *
1040246713Skib_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1041246713Skib    bus_dma_segment_t *segs, int nsegs, int error)
1042178172Simp{
1043178172Simp
1044246713Skib	if (segs == NULL)
1045246713Skib		segs = dmat->segments;
1046246713Skib	return (segs);
1047178172Simp}
1048178172Simp
1049178172Simp/*
1050178172Simp * Release the mapping held by map.
1051178172Simp */
1052178172Simpvoid
1053178172Simp_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1054178172Simp{
1055202046Simp	struct bounce_page *bpage;
1056178172Simp
1057202046Simp	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1058202046Simp		STAILQ_REMOVE_HEAD(&map->bpages, links);
1059202046Simp		free_bounce_page(dmat, bpage);
1060202046Simp	}
1061246713Skib	map->sync_count = 0;
1062178172Simp	return;
1063178172Simp}
1064178172Simp
1065202046Simpstatic void
1066289701Sianbus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
1067178172Simp{
1068202046Simp	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1069202046Simp	vm_offset_t buf_cl, buf_clend;
1070202046Simp	vm_size_t size_cl, size_clend;
1071202046Simp	int cache_linesize_mask = mips_pdcache_linesize - 1;
1072178172Simp
1073202046Simp	/*
1074202046Simp	 * dcache invalidation operates on cache line aligned addresses
1075202046Simp	 * and could modify areas of memory that share the same cache line
1076202046Simp	 * at the beginning and the ending of the buffer. In order to
1077202046Simp	 * prevent a data loss we save these chunks in temporary buffer
1078289701Sian	 * before invalidation and restore them afer it.
1079289701Sian	 *
1080289716Sian	 * If the aligned flag is set the buffer is either an mbuf or came from
1081289716Sian	 * our allocator caches.  In both cases they are always sized and
1082289716Sian	 * aligned to cacheline boundaries, so we can skip preserving nearby
1083289716Sian	 * data if a transfer appears to overlap cachelines.  An mbuf in
1084289716Sian	 * particular will usually appear to be overlapped because of offsetting
1085289716Sian	 * within the buffer to align the L3 headers, but we know that the bytes
1086289716Sian	 * preceeding that offset are part of the same mbuf memory and are not
1087289716Sian	 * unrelated adjacent data (and a rule of mbuf handling is that the cpu
1088289716Sian	 * is not allowed to touch the mbuf while dma is in progress, including
1089289716Sian	 * header fields).
1090202046Simp	 */
1091289701Sian	if (aligned) {
1092289701Sian		size_cl = 0;
1093289701Sian		size_clend = 0;
1094289701Sian	} else {
1095289701Sian		buf_cl = buf & ~cache_linesize_mask;
1096289701Sian		size_cl = buf & cache_linesize_mask;
1097289701Sian		buf_clend = buf + len;
1098289701Sian		size_clend = (mips_pdcache_linesize -
1099289701Sian		    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1100289701Sian	}
1101202046Simp
1102178172Simp	switch (op) {
1103202046Simp	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1104202046Simp	case BUS_DMASYNC_POSTREAD:
1105202046Simp
1106202046Simp		/*
1107202046Simp		 * Save buffers that might be modified by invalidation
1108202046Simp		 */
1109202046Simp		if (size_cl)
1110202046Simp			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1111202046Simp		if (size_clend)
1112202046Simp			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1113246713Skib		mips_dcache_inv_range(buf, len);
1114202046Simp		/*
1115202046Simp		 * Restore them
1116202046Simp		 */
1117202046Simp		if (size_cl)
1118202046Simp			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1119202046Simp		if (size_clend)
1120202046Simp			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1121203080Skan		/*
1122203080Skan		 * Copies above have brought corresponding memory
1123203080Skan		 * cache lines back into dirty state. Write them back
1124203080Skan		 * out and invalidate affected cache lines again if
1125203080Skan		 * necessary.
1126203080Skan		 */
1127203080Skan		if (size_cl)
1128246713Skib			mips_dcache_wbinv_range(buf_cl, size_cl);
1129203080Skan		if (size_clend && (size_cl == 0 ||
1130203080Skan                    buf_clend - buf_cl > mips_pdcache_linesize))
1131246713Skib			mips_dcache_wbinv_range(buf_clend, size_clend);
1132202046Simp		break;
1133202046Simp
1134178172Simp	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1135290082Sadrian		mips_dcache_wbinv_range(buf, len);
1136178172Simp		break;
1137178172Simp
1138178172Simp	case BUS_DMASYNC_PREREAD:
1139202046Simp		/*
1140202046Simp		 * Save buffers that might be modified by invalidation
1141202046Simp		 */
1142202046Simp		if (size_cl)
1143202046Simp			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1144202046Simp		if (size_clend)
1145202046Simp			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1146246713Skib		mips_dcache_inv_range(buf, len);
1147202046Simp		/*
1148202046Simp		 * Restore them
1149202046Simp		 */
1150202046Simp		if (size_cl)
1151202046Simp			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1152202046Simp		if (size_clend)
1153202046Simp			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1154203080Skan		/*
1155203080Skan		 * Copies above have brought corresponding memory
1156203080Skan		 * cache lines back into dirty state. Write them back
1157203080Skan		 * out and invalidate affected cache lines again if
1158203080Skan		 * necessary.
1159203080Skan		 */
1160203080Skan		if (size_cl)
1161246713Skib			mips_dcache_wbinv_range(buf_cl, size_cl);
1162203080Skan		if (size_clend && (size_cl == 0 ||
1163203080Skan                    buf_clend - buf_cl > mips_pdcache_linesize))
1164246713Skib			mips_dcache_wbinv_range(buf_clend, size_clend);
1165178172Simp		break;
1166178172Simp
1167178172Simp	case BUS_DMASYNC_PREWRITE:
1168246713Skib		mips_dcache_wb_range(buf, len);
1169178172Simp		break;
1170178172Simp	}
1171178172Simp}
1172178172Simp
1173202046Simpstatic void
1174202046Simp_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1175202046Simp{
1176202046Simp	struct bounce_page *bpage;
1177202046Simp
1178202046Simp	STAILQ_FOREACH(bpage, &map->bpages, links) {
1179202046Simp		if (op & BUS_DMASYNC_PREWRITE) {
1180246713Skib			if (bpage->datavaddr != 0)
1181246713Skib				bcopy((void *)bpage->datavaddr,
1182246713Skib				    (void *)(bpage->vaddr_nocache != 0 ?
1183246713Skib					     bpage->vaddr_nocache :
1184246713Skib					     bpage->vaddr),
1185246713Skib				    bpage->datacount);
1186246713Skib			else
1187246713Skib				physcopyout(bpage->dataaddr,
1188246713Skib				    (void *)(bpage->vaddr_nocache != 0 ?
1189246713Skib					     bpage->vaddr_nocache :
1190246713Skib					     bpage->vaddr),
1191246713Skib				    bpage->datacount);
1192202046Simp			if (bpage->vaddr_nocache == 0) {
1193202046Simp				mips_dcache_wb_range(bpage->vaddr,
1194202046Simp				    bpage->datacount);
1195202046Simp			}
1196202046Simp			dmat->bounce_zone->total_bounced++;
1197202046Simp		}
1198202046Simp		if (op & BUS_DMASYNC_POSTREAD) {
1199202046Simp			if (bpage->vaddr_nocache == 0) {
1200202046Simp				mips_dcache_inv_range(bpage->vaddr,
1201202046Simp				    bpage->datacount);
1202202046Simp			}
1203246713Skib			if (bpage->datavaddr != 0)
1204246713Skib				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1205246713Skib				    bpage->vaddr_nocache : bpage->vaddr),
1206246713Skib				    (void *)bpage->datavaddr, bpage->datacount);
1207246713Skib			else
1208246713Skib				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1209246713Skib				    bpage->vaddr_nocache : bpage->vaddr),
1210246713Skib				    bpage->dataaddr, bpage->datacount);
1211202046Simp			dmat->bounce_zone->total_bounced++;
1212202046Simp		}
1213202046Simp	}
1214202046Simp}
1215202046Simp
1216178172Simpvoid
1217178172Simp_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1218178172Simp{
1219246713Skib	struct sync_list *sl, *end;
1220289701Sian	int aligned;
1221178172Simp
1222202046Simp	if (op == BUS_DMASYNC_POSTWRITE)
1223178172Simp		return;
1224202046Simp	if (STAILQ_FIRST(&map->bpages))
1225202046Simp		_bus_dmamap_sync_bp(dmat, map, op);
1226204689Sneel
1227290219Sadrian	if ((dmat->flags & BUS_DMA_COHERENT) ||
1228290219Sadrian	    (map->flags & DMAMAP_UNCACHEABLE)) {
1229290219Sadrian		if (op & BUS_DMASYNC_PREWRITE)
1230290219Sadrian			mips_sync();
1231202046Simp		return;
1232290219Sadrian	}
1233204689Sneel
1234289701Sian	aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
1235289701Sian
1236178172Simp	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1237246713Skib	if (map->sync_count) {
1238246713Skib		end = &map->slist[map->sync_count];
1239246713Skib		for (sl = &map->slist[0]; sl != end; sl++)
1240289701Sian			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
1241289701Sian			    aligned);
1242178172Simp	}
1243178172Simp}
1244202046Simp
1245202046Simpstatic void
1246202046Simpinit_bounce_pages(void *dummy __unused)
1247202046Simp{
1248202046Simp
1249202046Simp	total_bpages = 0;
1250202046Simp	STAILQ_INIT(&bounce_zone_list);
1251202046Simp	STAILQ_INIT(&bounce_map_waitinglist);
1252202046Simp	STAILQ_INIT(&bounce_map_callbacklist);
1253202046Simp	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1254202046Simp}
1255202046SimpSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1256202046Simp
1257202046Simpstatic struct sysctl_ctx_list *
1258202046Simpbusdma_sysctl_tree(struct bounce_zone *bz)
1259202046Simp{
1260202046Simp	return (&bz->sysctl_tree);
1261202046Simp}
1262202046Simp
1263202046Simpstatic struct sysctl_oid *
1264202046Simpbusdma_sysctl_tree_top(struct bounce_zone *bz)
1265202046Simp{
1266202046Simp	return (bz->sysctl_tree_top);
1267202046Simp}
1268202046Simp
1269202046Simpstatic int
1270202046Simpalloc_bounce_zone(bus_dma_tag_t dmat)
1271202046Simp{
1272202046Simp	struct bounce_zone *bz;
1273202046Simp
1274202046Simp	/* Check to see if we already have a suitable zone */
1275202046Simp	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1276202046Simp		if ((dmat->alignment <= bz->alignment)
1277202046Simp		 && (dmat->lowaddr >= bz->lowaddr)) {
1278202046Simp			dmat->bounce_zone = bz;
1279202046Simp			return (0);
1280202046Simp		}
1281202046Simp	}
1282202046Simp
1283289701Sian	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
1284202046Simp	    M_NOWAIT | M_ZERO)) == NULL)
1285202046Simp		return (ENOMEM);
1286202046Simp
1287202046Simp	STAILQ_INIT(&bz->bounce_page_list);
1288202046Simp	bz->free_bpages = 0;
1289202046Simp	bz->reserved_bpages = 0;
1290202046Simp	bz->active_bpages = 0;
1291202046Simp	bz->lowaddr = dmat->lowaddr;
1292202046Simp	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1293202046Simp	bz->map_count = 0;
1294202046Simp	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1295202046Simp	busdma_zonecount++;
1296202046Simp	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1297202046Simp	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1298202046Simp	dmat->bounce_zone = bz;
1299202046Simp
1300202046Simp	sysctl_ctx_init(&bz->sysctl_tree);
1301202046Simp	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1302202046Simp	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1303202046Simp	    CTLFLAG_RD, 0, "");
1304202046Simp	if (bz->sysctl_tree_top == NULL) {
1305202046Simp		sysctl_ctx_free(&bz->sysctl_tree);
1306202046Simp		return (0);	/* XXX error code? */
1307202046Simp	}
1308202046Simp
1309202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1310202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1311202046Simp	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1312202046Simp	    "Total bounce pages");
1313202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1314202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1315202046Simp	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1316202046Simp	    "Free bounce pages");
1317202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1318202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1319202046Simp	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1320202046Simp	    "Reserved bounce pages");
1321202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1322202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1323202046Simp	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1324202046Simp	    "Active bounce pages");
1325202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1326202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1327202046Simp	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1328202046Simp	    "Total bounce requests");
1329202046Simp	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1330202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1331202046Simp	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1332202046Simp	    "Total bounce requests that were deferred");
1333202046Simp	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1334202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1335202046Simp	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1336273377Shselasky	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1337202046Simp	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1338273377Shselasky	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1339202046Simp
1340202046Simp	return (0);
1341202046Simp}
1342202046Simp
1343202046Simpstatic int
1344202046Simpalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1345202046Simp{
1346202046Simp	struct bounce_zone *bz;
1347202046Simp	int count;
1348202046Simp
1349202046Simp	bz = dmat->bounce_zone;
1350202046Simp	count = 0;
1351202046Simp	while (numpages > 0) {
1352202046Simp		struct bounce_page *bpage;
1353202046Simp
1354289701Sian		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
1355202046Simp						     M_NOWAIT | M_ZERO);
1356202046Simp
1357202046Simp		if (bpage == NULL)
1358202046Simp			break;
1359289701Sian		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
1360202046Simp							 M_NOWAIT, 0ul,
1361202046Simp							 bz->lowaddr,
1362202046Simp							 PAGE_SIZE,
1363202046Simp							 0);
1364202046Simp		if (bpage->vaddr == 0) {
1365289708Sian			free(bpage, M_BUSDMA);
1366202046Simp			break;
1367202046Simp		}
1368202046Simp		bpage->busaddr = pmap_kextract(bpage->vaddr);
1369202046Simp		bpage->vaddr_nocache =
1370212283Sjchandra		    (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
1371202046Simp		mtx_lock(&bounce_lock);
1372202046Simp		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1373202046Simp		total_bpages++;
1374202046Simp		bz->total_bpages++;
1375202046Simp		bz->free_bpages++;
1376202046Simp		mtx_unlock(&bounce_lock);
1377202046Simp		count++;
1378202046Simp		numpages--;
1379202046Simp	}
1380202046Simp	return (count);
1381202046Simp}
1382202046Simp
1383202046Simpstatic int
1384202046Simpreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1385202046Simp{
1386202046Simp	struct bounce_zone *bz;
1387202046Simp	int pages;
1388202046Simp
1389202046Simp	mtx_assert(&bounce_lock, MA_OWNED);
1390202046Simp	bz = dmat->bounce_zone;
1391202046Simp	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1392202046Simp	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1393202046Simp		return (map->pagesneeded - (map->pagesreserved + pages));
1394202046Simp	bz->free_bpages -= pages;
1395202046Simp	bz->reserved_bpages += pages;
1396202046Simp	map->pagesreserved += pages;
1397202046Simp	pages = map->pagesneeded - map->pagesreserved;
1398202046Simp
1399202046Simp	return (pages);
1400202046Simp}
1401202046Simp
1402202046Simpstatic bus_addr_t
1403202046Simpadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1404246713Skib		bus_addr_t addr, bus_size_t size)
1405202046Simp{
1406202046Simp	struct bounce_zone *bz;
1407202046Simp	struct bounce_page *bpage;
1408202046Simp
1409202046Simp	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1410202046Simp	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1411202046Simp
1412202046Simp	bz = dmat->bounce_zone;
1413202046Simp	if (map->pagesneeded == 0)
1414202046Simp		panic("add_bounce_page: map doesn't need any pages");
1415202046Simp	map->pagesneeded--;
1416202046Simp
1417202046Simp	if (map->pagesreserved == 0)
1418202046Simp		panic("add_bounce_page: map doesn't need any pages");
1419202046Simp	map->pagesreserved--;
1420202046Simp
1421202046Simp	mtx_lock(&bounce_lock);
1422202046Simp	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1423202046Simp	if (bpage == NULL)
1424202046Simp		panic("add_bounce_page: free page list is empty");
1425202046Simp
1426202046Simp	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1427202046Simp	bz->reserved_bpages--;
1428202046Simp	bz->active_bpages++;
1429202046Simp	mtx_unlock(&bounce_lock);
1430202046Simp
1431202046Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1432202046Simp		/* Page offset needs to be preserved. */
1433282120Shselasky		bpage->vaddr |= addr & PAGE_MASK;
1434282120Shselasky		bpage->busaddr |= addr & PAGE_MASK;
1435202046Simp	}
1436202046Simp	bpage->datavaddr = vaddr;
1437246713Skib	bpage->dataaddr = addr;
1438202046Simp	bpage->datacount = size;
1439202046Simp	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1440202046Simp	return (bpage->busaddr);
1441202046Simp}
1442202046Simp
1443202046Simpstatic void
1444202046Simpfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1445202046Simp{
1446202046Simp	struct bus_dmamap *map;
1447202046Simp	struct bounce_zone *bz;
1448202046Simp
1449202046Simp	bz = dmat->bounce_zone;
1450202046Simp	bpage->datavaddr = 0;
1451202046Simp	bpage->datacount = 0;
1452202046Simp	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1453202046Simp		/*
1454202046Simp		 * Reset the bounce page to start at offset 0.  Other uses
1455202046Simp		 * of this bounce page may need to store a full page of
1456202046Simp		 * data and/or assume it starts on a page boundary.
1457202046Simp		 */
1458202046Simp		bpage->vaddr &= ~PAGE_MASK;
1459202046Simp		bpage->busaddr &= ~PAGE_MASK;
1460202046Simp	}
1461202046Simp
1462202046Simp	mtx_lock(&bounce_lock);
1463202046Simp	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1464202046Simp	bz->free_bpages++;
1465202046Simp	bz->active_bpages--;
1466202046Simp	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1467202046Simp		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1468202046Simp			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1469202046Simp			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1470202046Simp					   map, links);
1471202046Simp			busdma_swi_pending = 1;
1472202046Simp			bz->total_deferred++;
1473202046Simp			swi_sched(vm_ih, 0);
1474202046Simp		}
1475202046Simp	}
1476202046Simp	mtx_unlock(&bounce_lock);
1477202046Simp}
1478202046Simp
1479202046Simpvoid
1480202046Simpbusdma_swi(void)
1481202046Simp{
1482202046Simp	bus_dma_tag_t dmat;
1483202046Simp	struct bus_dmamap *map;
1484202046Simp
1485202046Simp	mtx_lock(&bounce_lock);
1486202046Simp	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1487202046Simp		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1488202046Simp		mtx_unlock(&bounce_lock);
1489202046Simp		dmat = map->dmat;
1490202046Simp		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1491246713Skib		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1492246713Skib		    map->callback_arg, BUS_DMA_WAITOK);
1493202046Simp		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1494202046Simp		mtx_lock(&bounce_lock);
1495202046Simp	}
1496202046Simp	mtx_unlock(&bounce_lock);
1497202046Simp}
1498