busdma_machdep-v6.c revision 269214
1251875Speter/*-
2251875Speter * Copyright (c) 2012 Ian Lepore
3251875Speter * Copyright (c) 2010 Mark Tinguely
4251875Speter * Copyright (c) 2004 Olivier Houchard
5251875Speter * Copyright (c) 2002 Peter Grehan
6251875Speter * Copyright (c) 1997, 1998 Justin T. Gibbs.
7251875Speter * All rights reserved.
8251875Speter *
9251875Speter * Redistribution and use in source and binary forms, with or without
10251875Speter * modification, are permitted provided that the following conditions
11251875Speter * are met:
12251875Speter * 1. Redistributions of source code must retain the above copyright
13251875Speter *    notice, this list of conditions, and the following disclaimer,
14251875Speter *    without modification, immediately at the beginning of the file.
15251875Speter * 2. The name of the author may not be used to endorse or promote products
16251875Speter *    derived from this software without specific prior written permission.
17251875Speter *
18251875Speter * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19251875Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20251875Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21251875Speter * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22251875Speter * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23251875Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24251875Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25251875Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26251875Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27251875Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28251875Speter * SUCH DAMAGE.
29251875Speter *
30251875Speter *  From i386/busdma_machdep.c 191438 2009-04-23 20:24:19Z jhb
31251875Speter */
32251875Speter
33251875Speter#include <sys/cdefs.h>
34251875Speter__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep-v6.c 269214 2014-07-29 02:37:24Z ian $");
35251875Speter
36251875Speter#define _ARM32_BUS_DMA_PRIVATE
37251875Speter#include <sys/param.h>
38251875Speter#include <sys/kdb.h>
39251875Speter#include <ddb/ddb.h>
40251875Speter#include <ddb/db_output.h>
41251875Speter#include <sys/systm.h>
42251875Speter#include <sys/malloc.h>
43251875Speter#include <sys/bus.h>
44251875Speter#include <sys/busdma_bufalloc.h>
45251875Speter#include <sys/interrupt.h>
46251875Speter#include <sys/kernel.h>
47251875Speter#include <sys/ktr.h>
48251875Speter#include <sys/lock.h>
49251875Speter#include <sys/memdesc.h>
50251875Speter#include <sys/proc.h>
51251875Speter#include <sys/mutex.h>
52251875Speter#include <sys/sysctl.h>
53251875Speter#include <sys/uio.h>
54251875Speter
55251875Speter#include <vm/vm.h>
56251875Speter#include <vm/vm_page.h>
57251875Speter#include <vm/vm_map.h>
58251875Speter#include <vm/vm_extern.h>
59251875Speter#include <vm/vm_kern.h>
60251875Speter
61251875Speter#include <machine/atomic.h>
62251875Speter#include <machine/bus.h>
63251875Speter#include <machine/cpufunc.h>
64251875Speter#include <machine/md_var.h>
65251875Speter
66251875Speter#define MAX_BPAGES 64
67251875Speter#define BUS_DMA_EXCL_BOUNCE	BUS_DMA_BUS2
68251875Speter#define BUS_DMA_ALIGN_BOUNCE	BUS_DMA_BUS3
69251875Speter#define BUS_DMA_COULD_BOUNCE	(BUS_DMA_EXCL_BOUNCE | BUS_DMA_ALIGN_BOUNCE)
70251875Speter#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
71251875Speter
72251875Speterstruct bounce_zone;
73251875Speter
74251875Speterstruct bus_dma_tag {
75251875Speter	bus_dma_tag_t	  parent;
76251875Speter	bus_size_t	  alignment;
77251875Speter	bus_size_t	  boundary;
78251875Speter	bus_addr_t	  lowaddr;
79251875Speter	bus_addr_t	  highaddr;
80251875Speter	bus_dma_filter_t *filter;
81251875Speter	void		 *filterarg;
82251875Speter	bus_size_t	  maxsize;
83251875Speter	u_int		  nsegments;
84251875Speter	bus_size_t	  maxsegsz;
85251875Speter	int		  flags;
86251875Speter	int		  ref_count;
87251875Speter	int		  map_count;
88251875Speter	bus_dma_lock_t	 *lockfunc;
89251875Speter	void		 *lockfuncarg;
90251875Speter	struct bounce_zone *bounce_zone;
91251875Speter	/*
92251875Speter	 * DMA range for this tag.  If the page doesn't fall within
93251875Speter	 * one of these ranges, an error is returned.  The caller
94251875Speter	 * may then decide what to do with the transfer.  If the
95251875Speter	 * range pointer is NULL, it is ignored.
96251875Speter	 */
97251875Speter	struct arm32_dma_range	*ranges;
98251875Speter	int			_nranges;
99251875Speter	/*
100251875Speter	 * Most tags need one or two segments, and can use the local tagsegs
101251875Speter	 * array.  For tags with a larger limit, we'll allocate a bigger array
102251875Speter	 * on first use.
103251875Speter	 */
104251875Speter	bus_dma_segment_t	*segments;
105251875Speter	bus_dma_segment_t	tagsegs[2];
106251875Speter
107251875Speter
108251875Speter};
109251875Speter
110251875Speterstruct bounce_page {
111251875Speter	vm_offset_t	vaddr;		/* kva of bounce buffer */
112251875Speter	bus_addr_t	busaddr;	/* Physical address */
113251875Speter	vm_offset_t	datavaddr;	/* kva of client data */
114251875Speter	bus_addr_t	dataaddr;	/* client physical address */
115251875Speter	bus_size_t	datacount;	/* client data count */
116251875Speter	STAILQ_ENTRY(bounce_page) links;
117251875Speter};
118251875Speter
119251875Speterstruct sync_list {
120251875Speter	vm_offset_t	vaddr;		/* kva of bounce buffer */
121251875Speter	bus_addr_t	busaddr;	/* Physical address */
122251875Speter	bus_size_t	datacount;	/* client data count */
123251875Speter};
124251875Speter
125251875Speterint busdma_swi_pending;
126251875Speter
127251875Speterstruct bounce_zone {
128251875Speter	STAILQ_ENTRY(bounce_zone) links;
129251875Speter	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
130251875Speter	int		total_bpages;
131251875Speter	int		free_bpages;
132251875Speter	int		reserved_bpages;
133251875Speter	int		active_bpages;
134251875Speter	int		total_bounced;
135251875Speter	int		total_deferred;
136251875Speter	int		map_count;
137251875Speter	bus_size_t	alignment;
138251875Speter	bus_addr_t	lowaddr;
139251875Speter	char		zoneid[8];
140251875Speter	char		lowaddrid[20];
141251875Speter	struct sysctl_ctx_list sysctl_tree;
142251875Speter	struct sysctl_oid *sysctl_tree_top;
143251875Speter};
144251875Speter
145251875Speterstatic struct mtx bounce_lock;
146251875Speterstatic int total_bpages;
147251875Speterstatic int busdma_zonecount;
148251875Speterstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
149251875Speter
150251875SpeterSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
151251875SpeterSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
152251875Speter	   "Total bounce pages");
153251875Speter
154251875Speterstruct bus_dmamap {
155251875Speter	struct bp_list	       bpages;
156251875Speter	int		       pagesneeded;
157251875Speter	int		       pagesreserved;
158251875Speter	bus_dma_tag_t	       dmat;
159251875Speter	struct memdesc	       mem;
160251875Speter	pmap_t		       pmap;
161251875Speter	bus_dmamap_callback_t *callback;
162251875Speter	void		      *callback_arg;
163251875Speter	int		      flags;
164251875Speter#define DMAMAP_COHERENT		(1 << 0)
165251875Speter#define DMAMAP_DMAMEM_ALLOC	(1 << 1)
166251875Speter#define DMAMAP_MBUF		(1 << 2)
167251875Speter	STAILQ_ENTRY(bus_dmamap) links;
168251875Speter	int		       sync_count;
169251875Speter	struct sync_list       slist[];
170251875Speter};
171251875Speter
172251875Speterstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
173251875Speterstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
174251875Speter
175251875Speterstatic void init_bounce_pages(void *dummy);
176251875Speterstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
177253734Speterstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
178253734Speterstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
179253734Speter				int commit);
180253734Speterstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
181253734Speter				  vm_offset_t vaddr, bus_addr_t addr,
182253734Speter				  bus_size_t size);
183253734Speterstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
184253734Speterstatic void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
185251875Speter    void *buf, bus_size_t buflen, int flags);
186251875Speterstatic void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
187251875Speter    vm_paddr_t buf, bus_size_t buflen, int flags);
188251875Speterstatic int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
189251875Speter    int flags);
190251875Speter
191251875Speterstatic busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
192251875Speterstatic busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
193251875Speterstatic void
194251875Speterbusdma_init(void *dummy)
195251875Speter{
196251875Speter	int uma_flags;
197251875Speter
198251875Speter	uma_flags = 0;
199251875Speter
200251875Speter	/* Create a cache of buffers in standard (cacheable) memory. */
201251875Speter	standard_allocator = busdma_bufalloc_create("buffer",
202251875Speter	    arm_dcache_align,	/* minimum_alignment */
203251875Speter	    NULL,		/* uma_alloc func */
204251875Speter	    NULL,		/* uma_free func */
205251875Speter	    uma_flags);		/* uma_zcreate_flags */
206251875Speter
207251875Speter#ifdef INVARIANTS
208251875Speter	/*
209251875Speter	 * Force UMA zone to allocate service structures like
210251875Speter	 * slabs using own allocator. uma_debug code performs
211251875Speter	 * atomic ops on uma_slab_t fields and safety of this
212251875Speter	 * operation is not guaranteed for write-back caches
213251875Speter	 */
214251875Speter	uma_flags = UMA_ZONE_OFFPAGE;
215251875Speter#endif
216251875Speter	/*
217251875Speter	 * Create a cache of buffers in uncacheable memory, to implement the
218251875Speter	 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
219251875Speter	 */
220251875Speter	coherent_allocator = busdma_bufalloc_create("coherent",
221251875Speter	    arm_dcache_align,	/* minimum_alignment */
222251875Speter	    busdma_bufalloc_alloc_uncacheable,
223251875Speter	    busdma_bufalloc_free_uncacheable,
224251875Speter	    uma_flags);	/* uma_zcreate_flags */
225251875Speter}
226251875Speter
227251875Speter/*
228251875Speter * This init historically used SI_SUB_VM, but now the init code requires
229251875Speter * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
230251875Speter * SI_SUB_KMEM and SI_ORDER_THIRD, so we'll go right after that by using
231251875Speter * SI_SUB_KMEM and SI_ORDER_FOURTH.
232251875Speter */
233251875SpeterSYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
234251875Speter
235251875Speter/*
236251875Speter * This routine checks the exclusion zone constraints from a tag against the
237251875Speter * physical RAM available on the machine.  If a tag specifies an exclusion zone
238251875Speter * but there's no RAM in that zone, then we avoid allocating resources to bounce
239251875Speter * a request, and we can use any memory allocator (as opposed to needing
240251875Speter * kmem_alloc_contig() just because it can allocate pages in an address range).
241251875Speter *
242251875Speter * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
243251875Speter * same value on 32-bit architectures) as their lowaddr constraint, and we can't
244251875Speter * possibly have RAM at an address higher than the highest address we can
245251875Speter * express, so we take a fast out.
246251875Speter */
247251875Speterstatic int
248251875Speterexclusion_bounce_check(vm_offset_t lowaddr, vm_offset_t highaddr)
249251875Speter{
250251875Speter	int i;
251251875Speter
252251875Speter	if (lowaddr >= BUS_SPACE_MAXADDR)
253251875Speter		return (0);
254251875Speter
255253734Speter	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
256251875Speter		if ((lowaddr >= phys_avail[i] && lowaddr < phys_avail[i + 1]) ||
257251875Speter		    (lowaddr < phys_avail[i] && highaddr >= phys_avail[i]))
258251875Speter			return (1);
259251875Speter	}
260251875Speter	return (0);
261251875Speter}
262251875Speter
263251875Speter/*
264251875Speter * Return true if the tag has an exclusion zone that could lead to bouncing.
265251875Speter */
266251875Speterstatic __inline int
267251875Speterexclusion_bounce(bus_dma_tag_t dmat)
268251875Speter{
269251875Speter
270251875Speter	return (dmat->flags & BUS_DMA_EXCL_BOUNCE);
271251875Speter}
272251875Speter
273251875Speter/*
274251875Speter * Return true if the given address does not fall on the alignment boundary.
275251875Speter */
276251875Speterstatic __inline int
277251875Speteralignment_bounce(bus_dma_tag_t dmat, bus_addr_t addr)
278251875Speter{
279251875Speter
280251875Speter	return (addr & (dmat->alignment - 1));
281251875Speter}
282251875Speter
283251875Speter/*
284251875Speter * Return true if the DMA should bounce because the start or end does not fall
285251875Speter * on a cacheline boundary (which would require a partial cacheline flush).
286251875Speter * COHERENT memory doesn't trigger cacheline flushes.  Memory allocated by
287251875Speter * bus_dmamem_alloc() is always aligned to cacheline boundaries, and there's a
288251875Speter * strict rule that such memory cannot be accessed by the CPU while DMA is in
289251875Speter * progress (or by multiple DMA engines at once), so that it's always safe to do
290251875Speter * full cacheline flushes even if that affects memory outside the range of a
291251875Speter * given DMA operation that doesn't involve the full allocated buffer.  If we're
292251875Speter * mapping an mbuf, that follows the same rules as a buffer we allocated.
293251875Speter */
294251875Speterstatic __inline int
295251875Spetercacheline_bounce(bus_dmamap_t map, bus_addr_t addr, bus_size_t size)
296251875Speter{
297251875Speter
298251875Speter	if (map->flags & (DMAMAP_DMAMEM_ALLOC | DMAMAP_COHERENT | DMAMAP_MBUF))
299251875Speter		return (0);
300251875Speter	return ((addr | size) & arm_dcache_align_mask);
301251875Speter}
302251875Speter
303251875Speter/*
304251875Speter * Return true if we might need to bounce the DMA described by addr and size.
305251875Speter *
306251875Speter * This is used to quick-check whether we need to do the more expensive work of
307251875Speter * checking the DMA page-by-page looking for alignment and exclusion bounces.
308251875Speter *
309251875Speter * Note that the addr argument might be either virtual or physical.  It doesn't
310251875Speter * matter because we only look at the low-order bits, which are the same in both
311251875Speter * address spaces.
312251875Speter */
313251875Speterstatic __inline int
314251875Spetermight_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t addr,
315251875Speter    bus_size_t size)
316251875Speter{
317251875Speter	return ((dmat->flags & BUS_DMA_EXCL_BOUNCE) ||
318251875Speter	    alignment_bounce(dmat, addr) ||
319251875Speter	    cacheline_bounce(map, addr, size));
320251875Speter}
321251875Speter
322251875Speter/*
323251875Speter * Return true if we must bounce the DMA described by paddr and size.
324251875Speter *
325251875Speter * Bouncing can be triggered by DMA that doesn't begin and end on cacheline
326251875Speter * boundaries, or doesn't begin on an alignment boundary, or falls within the
327251875Speter * exclusion zone of any tag in the ancestry chain.
328251875Speter *
329251875Speter * For exclusions, walk the chain of tags comparing paddr to the exclusion zone
330251875Speter * within each tag.  If the tag has a filter function, use it to decide whether
331251875Speter * the DMA needs to bounce, otherwise any DMA within the zone bounces.
332251875Speter */
333251875Speterstatic int
334251875Spetermust_bounce(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t paddr,
335251875Speter    bus_size_t size)
336251875Speter{
337251875Speter
338251875Speter	if (cacheline_bounce(map, paddr, size))
339251875Speter		return (1);
340251875Speter
341251875Speter	/*
342251875Speter	 *  The tag already contains ancestors' alignment restrictions so this
343251875Speter	 *  check doesn't need to be inside the loop.
344251875Speter	 */
345251875Speter	if (alignment_bounce(dmat, paddr))
346251875Speter		return (1);
347251875Speter
348251875Speter	/*
349251875Speter	 * Even though each tag has an exclusion zone that is a superset of its
350251875Speter	 * own and all its ancestors' exclusions, the exclusion zone of each tag
351251875Speter	 * up the chain must be checked within the loop, because the busdma
352251875Speter	 * rules say the filter function is called only when the address lies
353251875Speter	 * within the low-highaddr range of the tag that filterfunc belongs to.
354251875Speter	 */
355251875Speter	while (dmat != NULL && exclusion_bounce(dmat)) {
356251875Speter		if ((paddr >= dmat->lowaddr && paddr <= dmat->highaddr) &&
357251875Speter		    (dmat->filter == NULL ||
358251875Speter		    dmat->filter(dmat->filterarg, paddr) != 0))
359251875Speter			return (1);
360251875Speter		dmat = dmat->parent;
361251875Speter	}
362251875Speter
363251875Speter	return (0);
364251875Speter}
365251875Speter
366251875Speterstatic __inline struct arm32_dma_range *
367251875Speter_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
368251875Speter    bus_addr_t curaddr)
369251875Speter{
370251875Speter	struct arm32_dma_range *dr;
371251875Speter	int i;
372251875Speter
373251875Speter	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
374251875Speter		if (curaddr >= dr->dr_sysbase &&
375251875Speter		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
376251875Speter			return (dr);
377251875Speter	}
378251875Speter
379251875Speter	return (NULL);
380251875Speter}
381251875Speter
382251875Speter/*
383251875Speter * Convenience function for manipulating driver locks from busdma (during
384251875Speter * busdma_swi, for example).  Drivers that don't provide their own locks
385251875Speter * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
386251875Speter * non-mutex locking scheme don't have to use this at all.
387251875Speter */
388251875Spetervoid
389251875Speterbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
390251875Speter{
391251875Speter	struct mtx *dmtx;
392251875Speter
393251875Speter	dmtx = (struct mtx *)arg;
394251875Speter	switch (op) {
395251875Speter	case BUS_DMA_LOCK:
396251875Speter		mtx_lock(dmtx);
397251875Speter		break;
398251875Speter	case BUS_DMA_UNLOCK:
399251875Speter		mtx_unlock(dmtx);
400251875Speter		break;
401251875Speter	default:
402251875Speter		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
403251875Speter	}
404251875Speter}
405251875Speter
406251875Speter/*
407251875Speter * dflt_lock should never get called.  It gets put into the dma tag when
408251875Speter * lockfunc == NULL, which is only valid if the maps that are associated
409251875Speter * with the tag are meant to never be defered.
410251875Speter * XXX Should have a way to identify which driver is responsible here.
411251875Speter */
412251875Speterstatic void
413251875Speterdflt_lock(void *arg, bus_dma_lock_op_t op)
414251875Speter{
415251875Speter	panic("driver error: busdma dflt_lock called");
416251875Speter}
417251875Speter
418251875Speter/*
419251875Speter * Allocate a device specific dma_tag.
420251875Speter */
421251875Speterint
422251875Speterbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
423251875Speter		   bus_size_t boundary, bus_addr_t lowaddr,
424251875Speter		   bus_addr_t highaddr, bus_dma_filter_t *filter,
425251875Speter		   void *filterarg, bus_size_t maxsize, int nsegments,
426251875Speter		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
427251875Speter		   void *lockfuncarg, bus_dma_tag_t *dmat)
428251875Speter{
429251875Speter	bus_dma_tag_t newtag;
430251875Speter	int error = 0;
431251875Speter
432251875Speter#if 0
433251875Speter	if (!parent)
434251875Speter		parent = arm_root_dma_tag;
435251875Speter#endif
436251875Speter
437251875Speter	/* Basic sanity checking */
438251875Speter	if (boundary != 0 && boundary < maxsegsz)
439251875Speter		maxsegsz = boundary;
440251875Speter
441251875Speter	/* Return a NULL tag on failure */
442251875Speter	*dmat = NULL;
443251875Speter
444251875Speter	if (maxsegsz == 0) {
445251875Speter		return (EINVAL);
446251875Speter	}
447251875Speter
448251875Speter	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
449251875Speter	    M_ZERO | M_NOWAIT);
450251875Speter	if (newtag == NULL) {
451251875Speter		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
452251875Speter		    __func__, newtag, 0, error);
453251875Speter		return (ENOMEM);
454251875Speter	}
455251875Speter
456251875Speter	newtag->parent = parent;
457251875Speter	newtag->alignment = alignment;
458251875Speter	newtag->boundary = boundary;
459251875Speter	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
460251875Speter	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
461251875Speter	    (PAGE_SIZE - 1);
462251875Speter	newtag->filter = filter;
463251875Speter	newtag->filterarg = filterarg;
464251875Speter	newtag->maxsize = maxsize;
465251875Speter	newtag->nsegments = nsegments;
466251875Speter	newtag->maxsegsz = maxsegsz;
467251875Speter	newtag->flags = flags;
468251875Speter	newtag->ref_count = 1; /* Count ourself */
469251875Speter	newtag->map_count = 0;
470251875Speter	newtag->ranges = bus_dma_get_range();
471251875Speter	newtag->_nranges = bus_dma_get_range_nb();
472251875Speter	if (lockfunc != NULL) {
473251875Speter		newtag->lockfunc = lockfunc;
474251875Speter		newtag->lockfuncarg = lockfuncarg;
475251875Speter	} else {
476251875Speter		newtag->lockfunc = dflt_lock;
477251875Speter		newtag->lockfuncarg = NULL;
478251875Speter	}
479251875Speter	/*
480251875Speter	 * If all the segments we need fit into the local tagsegs array, set the
481251875Speter	 * pointer now.  Otherwise NULL the pointer and an array of segments
482251875Speter	 * will be allocated later, on first use.  We don't pre-allocate now
483251875Speter	 * because some tags exist just to pass contraints to children in the
484251875Speter	 * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we
485251875Speter	 * sure don't want to try to allocate an array for that.
486251875Speter	 */
487251875Speter	if (newtag->nsegments <= nitems(newtag->tagsegs))
488251875Speter		newtag->segments = newtag->tagsegs;
489251875Speter	else
490251875Speter		newtag->segments = NULL;
491251875Speter
492251875Speter	/* Take into account any restrictions imposed by our parent tag */
493251875Speter	if (parent != NULL) {
494251875Speter		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
495251875Speter		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
496251875Speter		newtag->alignment = MAX(parent->alignment, newtag->alignment);
497251875Speter		newtag->flags |= parent->flags & BUS_DMA_COULD_BOUNCE;
498251875Speter		if (newtag->boundary == 0)
499251875Speter			newtag->boundary = parent->boundary;
500251875Speter		else if (parent->boundary != 0)
501251875Speter			newtag->boundary = MIN(parent->boundary,
502251875Speter					       newtag->boundary);
503251875Speter		if (newtag->filter == NULL) {
504251875Speter			/*
505251875Speter			 * Short circuit to looking at our parent directly
506251875Speter			 * since we have encapsulated all of its information
507251875Speter			 */
508251875Speter			newtag->filter = parent->filter;
509251875Speter			newtag->filterarg = parent->filterarg;
510251875Speter			newtag->parent = parent->parent;
511251875Speter		}
512251875Speter		if (newtag->parent != NULL)
513251875Speter			atomic_add_int(&parent->ref_count, 1);
514251875Speter	}
515251875Speter
516251875Speter	if (exclusion_bounce_check(newtag->lowaddr, newtag->highaddr))
517251875Speter		newtag->flags |= BUS_DMA_EXCL_BOUNCE;
518251875Speter	if (alignment_bounce(newtag, 1))
519251875Speter		newtag->flags |= BUS_DMA_ALIGN_BOUNCE;
520251875Speter
521251875Speter	/*
522251875Speter	 * Any request can auto-bounce due to cacheline alignment, in addition
523251875Speter	 * to any alignment or boundary specifications in the tag, so if the
524251875Speter	 * ALLOCNOW flag is set, there's always work to do.
525251875Speter	 */
526251875Speter	if ((flags & BUS_DMA_ALLOCNOW) != 0) {
527251875Speter		struct bounce_zone *bz;
528251875Speter		/*
529251875Speter		 * Round size up to a full page, and add one more page because
530251875Speter		 * there can always be one more boundary crossing than the
531251875Speter		 * number of pages in a transfer.
532251875Speter		 */
533251875Speter		maxsize = roundup2(maxsize, PAGE_SIZE) + PAGE_SIZE;
534251875Speter
535251875Speter		if ((error = alloc_bounce_zone(newtag)) != 0) {
536251875Speter			free(newtag, M_DEVBUF);
537251875Speter			return (error);
538251875Speter		}
539251875Speter		bz = newtag->bounce_zone;
540251875Speter
541251875Speter		if (ptoa(bz->total_bpages) < maxsize) {
542251875Speter			int pages;
543251875Speter
544251875Speter			pages = atop(maxsize) - bz->total_bpages;
545251875Speter
546251875Speter			/* Add pages to our bounce pool */
547251875Speter			if (alloc_bounce_pages(newtag, pages) < pages)
548251875Speter				error = ENOMEM;
549251875Speter		}
550251875Speter		/* Performed initial allocation */
551251875Speter		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
552251875Speter	} else
553251875Speter		newtag->bounce_zone = NULL;
554251875Speter
555266735Speter	if (error != 0) {
556251875Speter		free(newtag, M_DEVBUF);
557251875Speter	} else {
558251875Speter		*dmat = newtag;
559251875Speter	}
560251875Speter	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
561251875Speter	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
562251875Speter	return (error);
563251875Speter}
564251875Speter
565251875Speterint
566251875Speterbus_dma_tag_destroy(bus_dma_tag_t dmat)
567251875Speter{
568251875Speter	bus_dma_tag_t dmat_copy;
569251875Speter	int error;
570251875Speter
571251875Speter	error = 0;
572251875Speter	dmat_copy = dmat;
573251875Speter
574251875Speter	if (dmat != NULL) {
575251875Speter
576251875Speter		if (dmat->map_count != 0) {
577251875Speter			error = EBUSY;
578251875Speter			goto out;
579251875Speter		}
580251875Speter
581251875Speter		while (dmat != NULL) {
582251875Speter			bus_dma_tag_t parent;
583251875Speter
584251875Speter			parent = dmat->parent;
585251875Speter			atomic_subtract_int(&dmat->ref_count, 1);
586251875Speter			if (dmat->ref_count == 0) {
587251875Speter				if (dmat->segments != NULL &&
588251875Speter				    dmat->segments != dmat->tagsegs)
589251875Speter					free(dmat->segments, M_DEVBUF);
590251875Speter				free(dmat, M_DEVBUF);
591251875Speter				/*
592251875Speter				 * Last reference count, so
593251875Speter				 * release our reference
594251875Speter				 * count on our parent.
595251875Speter				 */
596251875Speter				dmat = parent;
597251875Speter			} else
598251875Speter				dmat = NULL;
599251875Speter		}
600251875Speter	}
601251875Speterout:
602251875Speter	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
603251875Speter	return (error);
604251875Speter}
605251875Speter
606251875Speterstatic int allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t mapp)
607251875Speter{
608251875Speter        struct bounce_zone *bz;
609251875Speter	int maxpages;
610251875Speter	int error;
611251875Speter
612251875Speter	if (dmat->bounce_zone == NULL)
613251875Speter		if ((error = alloc_bounce_zone(dmat)) != 0)
614251875Speter			return (error);
615251875Speter	bz = dmat->bounce_zone;
616251875Speter	/* Initialize the new map */
617251875Speter	STAILQ_INIT(&(mapp->bpages));
618251875Speter
619251875Speter	/*
620251875Speter	 * Attempt to add pages to our pool on a per-instance basis up to a sane
621251875Speter	 * limit.  Even if the tag isn't flagged as COULD_BOUNCE due to
622251875Speter	 * alignment and boundary constraints, it could still auto-bounce due to
623251875Speter	 * cacheline alignment, which requires at most two bounce pages.
624251875Speter	 */
625251875Speter	if (dmat->flags & BUS_DMA_COULD_BOUNCE)
626251875Speter		maxpages = MAX_BPAGES;
627251875Speter	else
628251875Speter		maxpages = 2 * bz->map_count;
629251875Speter	if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 ||
630251875Speter	    (bz->map_count > 0 && bz->total_bpages < maxpages)) {
631251875Speter		int pages;
632251875Speter
633251875Speter		pages = atop(roundup2(dmat->maxsize, PAGE_SIZE)) + 1;
634251875Speter		pages = MIN(maxpages - bz->total_bpages, pages);
635251875Speter		pages = MAX(pages, 2);
636251875Speter		if (alloc_bounce_pages(dmat, pages) < pages)
637251875Speter			return (ENOMEM);
638251875Speter
639251875Speter		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0)
640251875Speter			dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
641251875Speter	}
642251875Speter	bz->map_count++;
643251875Speter	return (0);
644251875Speter}
645251875Speter
646251875Speter/*
647251875Speter * Allocate a handle for mapping from kva/uva/physical
648251875Speter * address space into bus device space.
649251875Speter */
650251875Speterint
651251875Speterbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
652251875Speter{
653251875Speter	bus_dmamap_t map;
654251875Speter	int mapsize;
655251875Speter	int error = 0;
656251875Speter
657251875Speter	mapsize = sizeof(*map) + (sizeof(struct sync_list) * dmat->nsegments);
658251875Speter	*mapp = map = malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
659251875Speter	if (map == NULL) {
660251875Speter		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
661251875Speter		return (ENOMEM);
662251875Speter	}
663251875Speter	map->sync_count = 0;
664251875Speter
665251875Speter	if (dmat->segments == NULL) {
666251875Speter		dmat->segments = (bus_dma_segment_t *)malloc(
667251875Speter		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
668251875Speter		    M_NOWAIT);
669251875Speter		if (dmat->segments == NULL) {
670251875Speter			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
671251875Speter			    __func__, dmat, ENOMEM);
672251875Speter			free(map, M_DEVBUF);
673251875Speter			*mapp = NULL;
674251875Speter			return (ENOMEM);
675251875Speter		}
676251875Speter	}
677251875Speter	/*
678251875Speter	 * Bouncing might be required if the driver asks for an active
679251875Speter	 * exclusion region, a data alignment that is stricter than 1, and/or
680251875Speter	 * an active address boundary.
681251875Speter	 */
682251875Speter	error = allocate_bz_and_pages(dmat, map);
683251875Speter	if (error != 0) {
684251875Speter		free(map, M_DEVBUF);
685251875Speter		*mapp = NULL;
686251875Speter		return (error);
687251875Speter	}
688251875Speter	return (error);
689251875Speter}
690251875Speter
691251875Speter/*
692251875Speter * Destroy a handle for mapping from kva/uva/physical
693251875Speter * address space into bus device space.
694251875Speter */
695251875Speterint
696251875Speterbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
697251875Speter{
698251875Speter	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
699251875Speter		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
700251875Speter		    __func__, dmat, EBUSY);
701251875Speter		return (EBUSY);
702251875Speter	}
703251875Speter	if (dmat->bounce_zone)
704251875Speter		dmat->bounce_zone->map_count--;
705251875Speter	free(map, M_DEVBUF);
706251875Speter	dmat->map_count--;
707251875Speter	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
708251875Speter	return (0);
709251875Speter}
710251875Speter
711251875Speter
712251875Speter/*
713251875Speter * Allocate a piece of memory that can be efficiently mapped into
714251875Speter * bus device space based on the constraints lited in the dma tag.
715251875Speter * A dmamap to for use with dmamap_load is also allocated.
716251875Speter */
717251875Speterint
718251875Speterbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
719251875Speter		 bus_dmamap_t *mapp)
720251875Speter{
721251875Speter	busdma_bufalloc_t ba;
722251875Speter	struct busdma_bufzone *bufzone;
723251875Speter	bus_dmamap_t map;
724251875Speter	vm_memattr_t memattr;
725251875Speter	int mflags;
726251875Speter	int mapsize;
727251875Speter	int error;
728251875Speter
729251875Speter	if (flags & BUS_DMA_NOWAIT)
730251875Speter		mflags = M_NOWAIT;
731251875Speter	else
732251875Speter		mflags = M_WAITOK;
733251875Speter
734251875Speter	/* ARM non-snooping caches need a map for the VA cache sync structure */
735251875Speter
736251875Speter	mapsize = sizeof(*map) + (sizeof(struct sync_list) * dmat->nsegments);
737251875Speter	*mapp = map = malloc(mapsize, M_DEVBUF, M_NOWAIT | M_ZERO);
738251875Speter	if (map == NULL) {
739251875Speter		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
740251875Speter		    __func__, dmat, dmat->flags, ENOMEM);
741251875Speter		return (ENOMEM);
742251875Speter	}
743251875Speter	map->flags = DMAMAP_DMAMEM_ALLOC;
744251875Speter	map->sync_count = 0;
745251875Speter
746251875Speter	/* We may need bounce pages, even for allocated memory */
747251875Speter	error = allocate_bz_and_pages(dmat, map);
748251875Speter	if (error != 0) {
749251875Speter		free(map, M_DEVBUF);
750251875Speter		*mapp = NULL;
751251875Speter		return (error);
752251875Speter	}
753251875Speter
754251875Speter	if (dmat->segments == NULL) {
755251875Speter		dmat->segments = (bus_dma_segment_t *)malloc(
756251875Speter		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
757251875Speter		    mflags);
758251875Speter		if (dmat->segments == NULL) {
759251875Speter			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
760251875Speter			    __func__, dmat, dmat->flags, ENOMEM);
761251875Speter			free(map, M_DEVBUF);
762251875Speter			*mapp = NULL;
763251875Speter			return (ENOMEM);
764251875Speter		}
765251875Speter	}
766251875Speter
767251875Speter	if (flags & BUS_DMA_ZERO)
768251875Speter		mflags |= M_ZERO;
769251875Speter	if (flags & BUS_DMA_COHERENT) {
770251875Speter		memattr = VM_MEMATTR_UNCACHEABLE;
771251875Speter		ba = coherent_allocator;
772251875Speter		map->flags |= DMAMAP_COHERENT;
773251875Speter	} else {
774251875Speter		memattr = VM_MEMATTR_DEFAULT;
775251875Speter		ba = standard_allocator;
776251875Speter	}
777251875Speter
778251875Speter	/*
779251875Speter	 * Try to find a bufzone in the allocator that holds a cache of buffers
780251875Speter	 * of the right size for this request.  If the buffer is too big to be
781251875Speter	 * held in the allocator cache, this returns NULL.
782251875Speter	 */
783251875Speter	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
784251875Speter
785251875Speter	/*
786251875Speter	 * Allocate the buffer from the uma(9) allocator if...
787251875Speter	 *  - It's small enough to be in the allocator (bufzone not NULL).
788251875Speter	 *  - The alignment constraint isn't larger than the allocation size
789251875Speter	 *    (the allocator aligns buffers to their size boundaries).
790251875Speter	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
791251875Speter	 * else allocate non-contiguous pages if...
792251875Speter	 *  - The page count that could get allocated doesn't exceed nsegments.
793251875Speter	 *  - The alignment constraint isn't larger than a page boundary.
794251875Speter	 *  - There are no boundary-crossing constraints.
795251875Speter	 * else allocate a block of contiguous pages because one or more of the
796251875Speter	 * constraints is something that only the contig allocator can fulfill.
797251875Speter	 */
798251875Speter	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
799251875Speter	    !exclusion_bounce(dmat)) {
800251875Speter		*vaddr = uma_zalloc(bufzone->umazone, mflags);
801251875Speter	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
802251875Speter	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
803251875Speter		*vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
804251875Speter		    mflags, 0, dmat->lowaddr, memattr);
805251875Speter	} else {
806251875Speter		*vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
807251875Speter		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
808251875Speter		    memattr);
809251875Speter	}
810251875Speter
811251875Speter
812251875Speter	if (*vaddr == NULL) {
813251875Speter		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
814251875Speter		    __func__, dmat, dmat->flags, ENOMEM);
815251875Speter		free(map, M_DEVBUF);
816251875Speter		*mapp = NULL;
817251875Speter		return (ENOMEM);
818251875Speter	}
819251875Speter	dmat->map_count++;
820251875Speter
821251875Speter	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
822251875Speter	    __func__, dmat, dmat->flags, 0);
823251875Speter	return (0);
824251875Speter}
825251875Speter
826251875Speter/*
827251875Speter * Free a piece of memory and it's allociated dmamap, that was allocated
828251875Speter * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
829251875Speter */
830251875Spetervoid
831251875Speterbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
832251875Speter{
833251875Speter	struct busdma_bufzone *bufzone;
834251875Speter	busdma_bufalloc_t ba;
835251875Speter
836251875Speter	if (map->flags & DMAMAP_COHERENT)
837251875Speter		ba = coherent_allocator;
838251875Speter	else
839251875Speter		ba = standard_allocator;
840251875Speter
841251875Speter	/* Be careful not to access map from here on. */
842251875Speter
843251875Speter	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
844251875Speter
845251875Speter	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
846251875Speter	    !exclusion_bounce(dmat))
847251875Speter		uma_zfree(bufzone->umazone, vaddr);
848251875Speter	else
849251875Speter		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
850251875Speter
851251875Speter	dmat->map_count--;
852251875Speter	free(map, M_DEVBUF);
853251875Speter	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
854251875Speter}
855251875Speter
856251875Speterstatic void
857251875Speter_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
858251875Speter    bus_size_t buflen, int flags)
859251875Speter{
860251875Speter	bus_addr_t curaddr;
861251875Speter	bus_size_t sgsize;
862251875Speter
863251875Speter	if (map->pagesneeded == 0) {
864251875Speter		CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
865251875Speter		    " map= %p, pagesneeded= %d",
866251875Speter		    dmat->lowaddr, dmat->boundary, dmat->alignment,
867251875Speter		    map, map->pagesneeded);
868251875Speter		/*
869251875Speter		 * Count the number of bounce pages
870251875Speter		 * needed in order to complete this transfer
871251875Speter		 */
872251875Speter		curaddr = buf;
873251875Speter		while (buflen != 0) {
874251875Speter			sgsize = MIN(buflen, dmat->maxsegsz);
875251875Speter			if (must_bounce(dmat, map, curaddr, sgsize) != 0) {
876251875Speter				sgsize = MIN(sgsize, PAGE_SIZE);
877251875Speter				map->pagesneeded++;
878251875Speter			}
879251875Speter			curaddr += sgsize;
880251875Speter			buflen -= sgsize;
881251875Speter		}
882251875Speter		CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
883251875Speter	}
884251875Speter}
885251875Speter
886251875Speterstatic void
887251875Speter_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
888251875Speter    void *buf, bus_size_t buflen, int flags)
889251875Speter{
890251875Speter	vm_offset_t vaddr;
891251875Speter	vm_offset_t vendaddr;
892251875Speter	bus_addr_t paddr;
893251875Speter
894251875Speter	if (map->pagesneeded == 0) {
895251875Speter		CTR5(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d"
896251875Speter		    " map= %p, pagesneeded= %d",
897251875Speter		    dmat->lowaddr, dmat->boundary, dmat->alignment,
898251875Speter		    map, map->pagesneeded);
899251875Speter		/*
900251875Speter		 * Count the number of bounce pages
901251875Speter		 * needed in order to complete this transfer
902251875Speter		 */
903251875Speter		vaddr = (vm_offset_t)buf;
904251875Speter		vendaddr = (vm_offset_t)buf + buflen;
905251875Speter
906251875Speter		while (vaddr < vendaddr) {
907251875Speter			if (__predict_true(map->pmap == kernel_pmap))
908251875Speter				paddr = pmap_kextract(vaddr);
909251875Speter			else
910251875Speter				paddr = pmap_extract(map->pmap, vaddr);
911251875Speter			if (must_bounce(dmat, map, paddr,
912251875Speter			    min(vendaddr - vaddr, (PAGE_SIZE - ((vm_offset_t)vaddr &
913251875Speter			    PAGE_MASK)))) != 0) {
914251875Speter				map->pagesneeded++;
915251875Speter			}
916251875Speter			vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
917251875Speter
918251875Speter		}
919251875Speter		CTR1(KTR_BUSDMA, "pagesneeded= %d", map->pagesneeded);
920251875Speter	}
921251875Speter}
922251875Speter
923251875Speterstatic int
924251875Speter_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
925251875Speter{
926251875Speter
927251875Speter	/* Reserve Necessary Bounce Pages */
928251875Speter	mtx_lock(&bounce_lock);
929251875Speter	if (flags & BUS_DMA_NOWAIT) {
930251875Speter		if (reserve_bounce_pages(dmat, map, 0) != 0) {
931251875Speter			map->pagesneeded = 0;
932251875Speter			mtx_unlock(&bounce_lock);
933251875Speter			return (ENOMEM);
934251875Speter		}
935251875Speter	} else {
936251875Speter		if (reserve_bounce_pages(dmat, map, 1) != 0) {
937251875Speter			/* Queue us for resources */
938251875Speter			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
939251875Speter			mtx_unlock(&bounce_lock);
940251875Speter			return (EINPROGRESS);
941251875Speter		}
942251875Speter	}
943251875Speter	mtx_unlock(&bounce_lock);
944251875Speter
945251875Speter	return (0);
946251875Speter}
947251875Speter
948251875Speter/*
949251875Speter * Add a single contiguous physical range to the segment list.
950251875Speter */
951251875Speterstatic int
952251875Speter_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
953251875Speter		   bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
954251875Speter{
955251875Speter	bus_addr_t baddr, bmask;
956251875Speter	int seg;
957251875Speter
958251875Speter	/*
959251875Speter	 * Make sure we don't cross any boundaries.
960251875Speter	 */
961251875Speter	bmask = ~(dmat->boundary - 1);
962251875Speter	if (dmat->boundary > 0) {
963251875Speter		baddr = (curaddr + dmat->boundary) & bmask;
964251875Speter		if (sgsize > (baddr - curaddr))
965251875Speter			sgsize = (baddr - curaddr);
966251875Speter	}
967251875Speter
968251875Speter	if (dmat->ranges) {
969251875Speter		struct arm32_dma_range *dr;
970251875Speter
971251875Speter		dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
972251875Speter		    curaddr);
973251875Speter		if (dr == NULL) {
974251875Speter			_bus_dmamap_unload(dmat, map);
975251875Speter			return (0);
976251875Speter		}
977251875Speter		/*
978251875Speter		 * In a valid DMA range.  Translate the physical
979251875Speter		 * memory address to an address in the DMA window.
980251875Speter		 */
981251875Speter		curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
982251875Speter	}
983251875Speter
984251875Speter	/*
985251875Speter	 * Insert chunk into a segment, coalescing with
986251875Speter	 * previous segment if possible.
987251875Speter	 */
988251875Speter	seg = *segp;
989251875Speter	if (seg == -1) {
990251875Speter		seg = 0;
991251875Speter		segs[seg].ds_addr = curaddr;
992251875Speter		segs[seg].ds_len = sgsize;
993251875Speter	} else {
994251875Speter		if (curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
995251875Speter		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
996251875Speter		    (dmat->boundary == 0 ||
997251875Speter		     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
998251875Speter			segs[seg].ds_len += sgsize;
999251875Speter		else {
1000251875Speter			if (++seg >= dmat->nsegments)
1001251875Speter				return (0);
1002251875Speter			segs[seg].ds_addr = curaddr;
1003251875Speter			segs[seg].ds_len = sgsize;
1004251875Speter		}
1005251875Speter	}
1006251875Speter	*segp = seg;
1007251875Speter	return (sgsize);
1008251875Speter}
1009251875Speter
1010251875Speter/*
1011251875Speter * Utility function to load a physical buffer.  segp contains
1012251875Speter * the starting segment on entrace, and the ending segment on exit.
1013251875Speter */
1014251875Speterint
1015251875Speter_bus_dmamap_load_phys(bus_dma_tag_t dmat,
1016251875Speter		      bus_dmamap_t map,
1017251875Speter		      vm_paddr_t buf, bus_size_t buflen,
1018251875Speter		      int flags,
1019251875Speter		      bus_dma_segment_t *segs,
1020251875Speter		      int *segp)
1021251875Speter{
1022251875Speter	bus_addr_t curaddr;
1023251875Speter	bus_size_t sgsize;
1024251875Speter	int error;
1025251875Speter
1026251875Speter	if (segs == NULL)
1027251875Speter		segs = dmat->segments;
1028251875Speter
1029251875Speter	if (might_bounce(dmat, map, buflen, buflen)) {
1030251875Speter		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
1031251875Speter		if (map->pagesneeded != 0) {
1032251875Speter			error = _bus_dmamap_reserve_pages(dmat, map, flags);
1033251875Speter			if (error)
1034251875Speter				return (error);
1035251875Speter		}
1036251875Speter	}
1037251875Speter
1038251875Speter	while (buflen > 0) {
1039251875Speter		curaddr = buf;
1040251875Speter		sgsize = MIN(buflen, dmat->maxsegsz);
1041251875Speter		if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1042251875Speter		    sgsize)) {
1043251875Speter			sgsize = MIN(sgsize, PAGE_SIZE);
1044251875Speter			curaddr = add_bounce_page(dmat, map, 0, curaddr,
1045251875Speter						  sgsize);
1046251875Speter		}
1047251875Speter		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1048251875Speter		    segp);
1049251875Speter		if (sgsize == 0)
1050251875Speter			break;
1051251875Speter		buf += sgsize;
1052251875Speter		buflen -= sgsize;
1053251875Speter	}
1054251875Speter
1055251875Speter	/*
1056251875Speter	 * Did we fit?
1057251875Speter	 */
1058251875Speter	if (buflen != 0) {
1059251875Speter		_bus_dmamap_unload(dmat, map);
1060251875Speter		return (EFBIG); /* XXX better return value here? */
1061251875Speter	}
1062251875Speter	return (0);
1063251875Speter}
1064251875Speter
1065251875Speterint
1066251875Speter_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
1067251875Speter    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
1068251875Speter    bus_dma_segment_t *segs, int *segp)
1069251875Speter{
1070251875Speter
1071251875Speter	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
1072251875Speter	    segs, segp));
1073251875Speter}
1074251875Speter
1075251875Speter/*
1076251875Speter * Utility function to load a linear buffer.  segp contains
1077251875Speter * the starting segment on entrace, and the ending segment on exit.
1078251875Speter */
1079251875Speterint
1080251875Speter_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
1081251875Speter			bus_dmamap_t map,
1082251875Speter			void *buf, bus_size_t buflen,
1083251875Speter			pmap_t pmap,
1084251875Speter			int flags,
1085251875Speter			bus_dma_segment_t *segs,
1086251875Speter			int *segp)
1087251875Speter{
1088251875Speter	bus_size_t sgsize;
1089251875Speter	bus_addr_t curaddr;
1090251875Speter	vm_offset_t vaddr;
1091251875Speter	struct sync_list *sl;
1092251875Speter	int error;
1093251875Speter
1094251875Speter	if (segs == NULL)
1095251875Speter		segs = dmat->segments;
1096251875Speter
1097251875Speter	if (flags & BUS_DMA_LOAD_MBUF)
1098251875Speter		map->flags |= DMAMAP_MBUF;
1099251875Speter
1100251875Speter	map->pmap = pmap;
1101251875Speter
1102251875Speter	if (might_bounce(dmat, map, (bus_addr_t)buf, buflen)) {
1103251875Speter		_bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
1104251875Speter		if (map->pagesneeded != 0) {
1105251875Speter			error = _bus_dmamap_reserve_pages(dmat, map, flags);
1106251875Speter			if (error)
1107251875Speter				return (error);
1108251875Speter		}
1109251875Speter	}
1110251875Speter
1111	sl = NULL;
1112	vaddr = (vm_offset_t)buf;
1113
1114	while (buflen > 0) {
1115		/*
1116		 * Get the physical address for this segment.
1117		 */
1118		if (__predict_true(map->pmap == kernel_pmap))
1119			curaddr = pmap_kextract(vaddr);
1120		else
1121			curaddr = pmap_extract(map->pmap, vaddr);
1122
1123		/*
1124		 * Compute the segment size, and adjust counts.
1125		 */
1126		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
1127		if (sgsize > dmat->maxsegsz)
1128			sgsize = dmat->maxsegsz;
1129		if (buflen < sgsize)
1130			sgsize = buflen;
1131
1132		if (map->pagesneeded != 0 && must_bounce(dmat, map, curaddr,
1133		    sgsize)) {
1134			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
1135						  sgsize);
1136		} else {
1137			sl = &map->slist[map->sync_count - 1];
1138			if (map->sync_count == 0 ||
1139#ifdef ARM_L2_PIPT
1140			    curaddr != sl->busaddr + sl->datacount ||
1141#endif
1142			    vaddr != sl->vaddr + sl->datacount) {
1143				if (++map->sync_count > dmat->nsegments)
1144					goto cleanup;
1145				sl++;
1146				sl->vaddr = vaddr;
1147				sl->datacount = sgsize;
1148				sl->busaddr = curaddr;
1149			} else
1150				sl->datacount += sgsize;
1151		}
1152		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1153					    segp);
1154		if (sgsize == 0)
1155			break;
1156		vaddr += sgsize;
1157		buflen -= sgsize;
1158	}
1159
1160cleanup:
1161	/*
1162	 * Did we fit?
1163	 */
1164	if (buflen != 0) {
1165		_bus_dmamap_unload(dmat, map);
1166		return (EFBIG); /* XXX better return value here? */
1167	}
1168	return (0);
1169}
1170
1171
1172void
1173__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1174		    struct memdesc *mem, bus_dmamap_callback_t *callback,
1175		    void *callback_arg)
1176{
1177
1178	map->mem = *mem;
1179	map->dmat = dmat;
1180	map->callback = callback;
1181	map->callback_arg = callback_arg;
1182}
1183
1184bus_dma_segment_t *
1185_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1186		     bus_dma_segment_t *segs, int nsegs, int error)
1187{
1188
1189	if (segs == NULL)
1190		segs = dmat->segments;
1191	return (segs);
1192}
1193
1194/*
1195 * Release the mapping held by map.
1196 */
1197void
1198_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1199{
1200	struct bounce_page *bpage;
1201	struct bounce_zone *bz;
1202
1203	if ((bz = dmat->bounce_zone) != NULL) {
1204		while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1205			STAILQ_REMOVE_HEAD(&map->bpages, links);
1206			free_bounce_page(dmat, bpage);
1207		}
1208
1209		bz = dmat->bounce_zone;
1210		bz->free_bpages += map->pagesreserved;
1211		bz->reserved_bpages -= map->pagesreserved;
1212		map->pagesreserved = 0;
1213		map->pagesneeded = 0;
1214	}
1215	map->sync_count = 0;
1216	map->flags &= ~DMAMAP_MBUF;
1217}
1218
1219#ifdef notyetbounceuser
1220	/* If busdma uses user pages, then the interrupt handler could
1221	 * be use the kernel vm mapping. Both bounce pages and sync list
1222	 * do not cross page boundaries.
1223	 * Below is a rough sequence that a person would do to fix the
1224	 * user page reference in the kernel vmspace. This would be
1225	 * done in the dma post routine.
1226	 */
1227void
1228_bus_dmamap_fix_user(vm_offset_t buf, bus_size_t len,
1229			pmap_t pmap, int op)
1230{
1231	bus_size_t sgsize;
1232	bus_addr_t curaddr;
1233	vm_offset_t va;
1234
1235		/* each synclist entry is contained within a single page.
1236		 *
1237		 * this would be needed if BUS_DMASYNC_POSTxxxx was implemented
1238		*/
1239	curaddr = pmap_extract(pmap, buf);
1240	va = pmap_dma_map(curaddr);
1241	switch (op) {
1242	case SYNC_USER_INV:
1243		cpu_dcache_wb_range(va, sgsize);
1244		break;
1245
1246	case SYNC_USER_COPYTO:
1247		bcopy((void *)va, (void *)bounce, sgsize);
1248		break;
1249
1250	case SYNC_USER_COPYFROM:
1251		bcopy((void *) bounce, (void *)va, sgsize);
1252		break;
1253
1254	default:
1255		break;
1256	}
1257
1258	pmap_dma_unmap(va);
1259}
1260#endif
1261
1262#ifdef ARM_L2_PIPT
1263#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(pa, size)
1264#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(pa, size)
1265#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(pa, size)
1266#else
1267#define l2cache_wb_range(va, pa, size) cpu_l2cache_wb_range(va, size)
1268#define l2cache_wbinv_range(va, pa, size) cpu_l2cache_wbinv_range(va, size)
1269#define l2cache_inv_range(va, pa, size) cpu_l2cache_inv_range(va, size)
1270#endif
1271
1272void
1273_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1274{
1275	struct bounce_page *bpage;
1276	struct sync_list *sl, *end;
1277	/*
1278	 * If the buffer was from user space, it is possible that this is not
1279	 * the same vm map, especially on a POST operation.  It's not clear that
1280	 * dma on userland buffers can work at all right now, certainly not if a
1281	 * partial cacheline flush has to be handled.  To be safe, until we're
1282	 * able to test direct userland dma, panic on a map mismatch.
1283	 */
1284	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1285		if (!pmap_dmap_iscurrent(map->pmap))
1286			panic("_bus_dmamap_sync: wrong user map for bounce sync.");
1287		/* Handle data bouncing. */
1288		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1289		    "performing bounce", __func__, dmat, dmat->flags, op);
1290
1291		if (op & BUS_DMASYNC_PREWRITE) {
1292			while (bpage != NULL) {
1293				if (bpage->datavaddr != 0)
1294					bcopy((void *)bpage->datavaddr,
1295					    (void *)bpage->vaddr,
1296					    bpage->datacount);
1297				else
1298					physcopyout(bpage->dataaddr,
1299					    (void *)bpage->vaddr,
1300					    bpage->datacount);
1301				cpu_dcache_wb_range((vm_offset_t)bpage->vaddr,
1302					bpage->datacount);
1303				l2cache_wb_range((vm_offset_t)bpage->vaddr,
1304				    (vm_offset_t)bpage->busaddr,
1305				    bpage->datacount);
1306				bpage = STAILQ_NEXT(bpage, links);
1307			}
1308			dmat->bounce_zone->total_bounced++;
1309		}
1310
1311		if (op & BUS_DMASYNC_PREREAD) {
1312			bpage = STAILQ_FIRST(&map->bpages);
1313			while (bpage != NULL) {
1314				cpu_dcache_inv_range((vm_offset_t)bpage->vaddr,
1315				    bpage->datacount);
1316				l2cache_inv_range((vm_offset_t)bpage->vaddr,
1317				    (vm_offset_t)bpage->busaddr,
1318				    bpage->datacount);
1319				bpage = STAILQ_NEXT(bpage, links);
1320			}
1321		}
1322		if (op & BUS_DMASYNC_POSTREAD) {
1323			while (bpage != NULL) {
1324				vm_offset_t startv;
1325				vm_paddr_t startp;
1326				int len;
1327
1328				startv = bpage->vaddr &~ arm_dcache_align_mask;
1329				startp = bpage->busaddr &~ arm_dcache_align_mask;
1330				len = bpage->datacount;
1331
1332				if (startv != bpage->vaddr)
1333					len += bpage->vaddr & arm_dcache_align_mask;
1334				if (len & arm_dcache_align_mask)
1335					len = (len -
1336					    (len & arm_dcache_align_mask)) +
1337					    arm_dcache_align;
1338				cpu_dcache_inv_range(startv, len);
1339				l2cache_inv_range(startv, startp, len);
1340				if (bpage->datavaddr != 0)
1341					bcopy((void *)bpage->vaddr,
1342					    (void *)bpage->datavaddr,
1343					    bpage->datacount);
1344				else
1345					physcopyin((void *)bpage->vaddr,
1346					    bpage->dataaddr,
1347					    bpage->datacount);
1348				bpage = STAILQ_NEXT(bpage, links);
1349			}
1350			dmat->bounce_zone->total_bounced++;
1351		}
1352	}
1353	if (map->flags & DMAMAP_COHERENT)
1354		return;
1355
1356	if (map->sync_count != 0) {
1357		if (!pmap_dmap_iscurrent(map->pmap))
1358			panic("_bus_dmamap_sync: wrong user map for sync.");
1359		/* ARM caches are not self-snooping for dma */
1360
1361		sl = &map->slist[0];
1362		end = &map->slist[map->sync_count];
1363		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
1364		    "performing sync", __func__, dmat, dmat->flags, op);
1365
1366		switch (op) {
1367		case BUS_DMASYNC_PREWRITE:
1368			while (sl != end) {
1369			    cpu_dcache_wb_range(sl->vaddr, sl->datacount);
1370			    l2cache_wb_range(sl->vaddr, sl->busaddr,
1371				sl->datacount);
1372			    sl++;
1373			}
1374			break;
1375
1376		case BUS_DMASYNC_PREREAD:
1377			while (sl != end) {
1378				cpu_dcache_inv_range(sl->vaddr, sl->datacount);
1379				l2cache_inv_range(sl->vaddr, sl->busaddr,
1380				    sl->datacount);
1381				sl++;
1382			}
1383			break;
1384
1385		case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
1386			while (sl != end) {
1387				cpu_dcache_wbinv_range(sl->vaddr, sl->datacount);
1388				l2cache_wbinv_range(sl->vaddr,
1389				    sl->busaddr, sl->datacount);
1390				sl++;
1391			}
1392			break;
1393
1394		case BUS_DMASYNC_POSTREAD:
1395		case BUS_DMASYNC_POSTWRITE:
1396		case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1397			break;
1398		default:
1399			panic("unsupported combination of sync operations: 0x%08x\n", op);
1400			break;
1401		}
1402	}
1403}
1404
1405static void
1406init_bounce_pages(void *dummy __unused)
1407{
1408
1409	total_bpages = 0;
1410	STAILQ_INIT(&bounce_zone_list);
1411	STAILQ_INIT(&bounce_map_waitinglist);
1412	STAILQ_INIT(&bounce_map_callbacklist);
1413	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1414}
1415SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1416
1417static struct sysctl_ctx_list *
1418busdma_sysctl_tree(struct bounce_zone *bz)
1419{
1420	return (&bz->sysctl_tree);
1421}
1422
1423static struct sysctl_oid *
1424busdma_sysctl_tree_top(struct bounce_zone *bz)
1425{
1426	return (bz->sysctl_tree_top);
1427}
1428
1429static int
1430alloc_bounce_zone(bus_dma_tag_t dmat)
1431{
1432	struct bounce_zone *bz;
1433
1434	/* Check to see if we already have a suitable zone */
1435	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1436		if ((dmat->alignment <= bz->alignment) &&
1437		    (dmat->lowaddr >= bz->lowaddr)) {
1438			dmat->bounce_zone = bz;
1439			return (0);
1440		}
1441	}
1442
1443	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1444	    M_NOWAIT | M_ZERO)) == NULL)
1445		return (ENOMEM);
1446
1447	STAILQ_INIT(&bz->bounce_page_list);
1448	bz->free_bpages = 0;
1449	bz->reserved_bpages = 0;
1450	bz->active_bpages = 0;
1451	bz->lowaddr = dmat->lowaddr;
1452	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1453	bz->map_count = 0;
1454	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1455	busdma_zonecount++;
1456	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1457	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1458	dmat->bounce_zone = bz;
1459
1460	sysctl_ctx_init(&bz->sysctl_tree);
1461	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1462	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1463	    CTLFLAG_RD, 0, "");
1464	if (bz->sysctl_tree_top == NULL) {
1465		sysctl_ctx_free(&bz->sysctl_tree);
1466		return (0);	/* XXX error code? */
1467	}
1468
1469	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1470	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1471	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1472	    "Total bounce pages");
1473	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1474	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1475	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1476	    "Free bounce pages");
1477	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1478	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1479	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1480	    "Reserved bounce pages");
1481	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1482	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1483	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1484	    "Active bounce pages");
1485	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1486	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1487	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1488	    "Total bounce requests");
1489	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1490	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1491	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1492	    "Total bounce requests that were deferred");
1493	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1494	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1495	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1496	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1497	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1498	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1499
1500	return (0);
1501}
1502
1503static int
1504alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1505{
1506	struct bounce_zone *bz;
1507	int count;
1508
1509	bz = dmat->bounce_zone;
1510	count = 0;
1511	while (numpages > 0) {
1512		struct bounce_page *bpage;
1513
1514		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1515		    M_NOWAIT | M_ZERO);
1516
1517		if (bpage == NULL)
1518			break;
1519		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1520		    M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0);
1521		if (bpage->vaddr == 0) {
1522			free(bpage, M_DEVBUF);
1523			break;
1524		}
1525		bpage->busaddr = pmap_kextract(bpage->vaddr);
1526		mtx_lock(&bounce_lock);
1527		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1528		total_bpages++;
1529		bz->total_bpages++;
1530		bz->free_bpages++;
1531		mtx_unlock(&bounce_lock);
1532		count++;
1533		numpages--;
1534	}
1535	return (count);
1536}
1537
1538static int
1539reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1540{
1541	struct bounce_zone *bz;
1542	int pages;
1543
1544	mtx_assert(&bounce_lock, MA_OWNED);
1545	bz = dmat->bounce_zone;
1546	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1547	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1548		return (map->pagesneeded - (map->pagesreserved + pages));
1549	bz->free_bpages -= pages;
1550	bz->reserved_bpages += pages;
1551	map->pagesreserved += pages;
1552	pages = map->pagesneeded - map->pagesreserved;
1553
1554	return (pages);
1555}
1556
1557static bus_addr_t
1558add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1559		bus_addr_t addr, bus_size_t size)
1560{
1561	struct bounce_zone *bz;
1562	struct bounce_page *bpage;
1563
1564	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1565	KASSERT(map != NULL,
1566	    ("add_bounce_page: bad map %p", map));
1567
1568	bz = dmat->bounce_zone;
1569	if (map->pagesneeded == 0)
1570		panic("add_bounce_page: map doesn't need any pages");
1571	map->pagesneeded--;
1572
1573	if (map->pagesreserved == 0)
1574		panic("add_bounce_page: map doesn't need any pages");
1575	map->pagesreserved--;
1576
1577	mtx_lock(&bounce_lock);
1578	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1579	if (bpage == NULL)
1580		panic("add_bounce_page: free page list is empty");
1581
1582	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1583	bz->reserved_bpages--;
1584	bz->active_bpages++;
1585	mtx_unlock(&bounce_lock);
1586
1587	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1588		/* Page offset needs to be preserved. */
1589		bpage->vaddr |= vaddr & PAGE_MASK;
1590		bpage->busaddr |= vaddr & PAGE_MASK;
1591	}
1592	bpage->datavaddr = vaddr;
1593	bpage->dataaddr = addr;
1594	bpage->datacount = size;
1595	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1596	return (bpage->busaddr);
1597}
1598
1599static void
1600free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1601{
1602	struct bus_dmamap *map;
1603	struct bounce_zone *bz;
1604
1605	bz = dmat->bounce_zone;
1606	bpage->datavaddr = 0;
1607	bpage->datacount = 0;
1608	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1609		/*
1610		 * Reset the bounce page to start at offset 0.  Other uses
1611		 * of this bounce page may need to store a full page of
1612		 * data and/or assume it starts on a page boundary.
1613		 */
1614		bpage->vaddr &= ~PAGE_MASK;
1615		bpage->busaddr &= ~PAGE_MASK;
1616	}
1617
1618	mtx_lock(&bounce_lock);
1619	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1620	bz->free_bpages++;
1621	bz->active_bpages--;
1622	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1623		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1624			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1625			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1626			    map, links);
1627			busdma_swi_pending = 1;
1628			bz->total_deferred++;
1629			swi_sched(vm_ih, 0);
1630		}
1631	}
1632	mtx_unlock(&bounce_lock);
1633}
1634
1635void
1636busdma_swi(void)
1637{
1638	bus_dma_tag_t dmat;
1639	struct bus_dmamap *map;
1640
1641	mtx_lock(&bounce_lock);
1642	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1643		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1644		mtx_unlock(&bounce_lock);
1645		dmat = map->dmat;
1646		dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK);
1647		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1648		    map->callback_arg, BUS_DMA_WAITOK);
1649		dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1650		mtx_lock(&bounce_lock);
1651	}
1652	mtx_unlock(&bounce_lock);
1653}
1654