busdma_machdep-v4.c revision 246881
1/*-
2 * Copyright (c) 2012 Ian Lepore
3 * Copyright (c) 2004 Olivier Houchard
4 * Copyright (c) 2002 Peter Grehan
5 * Copyright (c) 1997, 1998 Justin T. Gibbs.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions, and the following disclaimer,
13 *    without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 246881 2013-02-16 20:43:16Z ian $");
34
35/*
36 * ARM bus dma support routines.
37 *
38 * XXX Things to investigate / fix some day...
39 *  - What is the earliest that this API can be called?  Could there be any
40 *    fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM?
41 *  - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the
42 *    bus_dmamap_load() function.  This code has historically (and still does)
43 *    honor it in bus_dmamem_alloc().  If we got rid of that we could lose some
44 *    error checking because some resource management calls would become WAITOK
45 *    and thus "cannot fail."
46 *  - The decisions made by _bus_dma_can_bounce() should be made once, at tag
47 *    creation time, and the result stored in the tag.
48 *  - It should be possible to take some shortcuts when mapping a buffer we know
49 *    came from the uma(9) allocators based on what we know about such buffers
50 *    (aligned, contiguous, etc).
51 *  - The allocation of bounce pages could probably be cleaned up, then we could
52 *    retire arm_remap_nocache().
53 */
54
55#define _ARM32_BUS_DMA_PRIVATE
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/malloc.h>
59#include <sys/bus.h>
60#include <sys/busdma_bufalloc.h>
61#include <sys/interrupt.h>
62#include <sys/lock.h>
63#include <sys/proc.h>
64#include <sys/memdesc.h>
65#include <sys/mutex.h>
66#include <sys/ktr.h>
67#include <sys/kernel.h>
68#include <sys/sysctl.h>
69#include <sys/uio.h>
70
71#include <vm/uma.h>
72#include <vm/vm.h>
73#include <vm/vm_extern.h>
74#include <vm/vm_kern.h>
75#include <vm/vm_page.h>
76#include <vm/vm_map.h>
77
78#include <machine/atomic.h>
79#include <machine/bus.h>
80#include <machine/cpufunc.h>
81#include <machine/md_var.h>
82
83#define MAX_BPAGES 64
84#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
85#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
86
87struct bounce_zone;
88
89struct bus_dma_tag {
90	bus_dma_tag_t		parent;
91	bus_size_t		alignment;
92	bus_addr_t		boundary;
93	bus_addr_t		lowaddr;
94	bus_addr_t		highaddr;
95	bus_dma_filter_t	*filter;
96	void			*filterarg;
97	bus_size_t		maxsize;
98	u_int			nsegments;
99	bus_size_t		maxsegsz;
100	int			flags;
101	int			ref_count;
102	int			map_count;
103	bus_dma_lock_t		*lockfunc;
104	void			*lockfuncarg;
105	/*
106	 * DMA range for this tag.  If the page doesn't fall within
107	 * one of these ranges, an error is returned.  The caller
108	 * may then decide what to do with the transfer.  If the
109	 * range pointer is NULL, it is ignored.
110	 */
111	struct arm32_dma_range	*ranges;
112	int			_nranges;
113	struct bounce_zone *bounce_zone;
114	/*
115	 * Most tags need one or two segments, and can use the local tagsegs
116	 * array.  For tags with a larger limit, we'll allocate a bigger array
117	 * on first use.
118	 */
119	bus_dma_segment_t	*segments;
120	bus_dma_segment_t	tagsegs[2];
121};
122
123struct bounce_page {
124	vm_offset_t	vaddr;		/* kva of bounce buffer */
125	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
126	bus_addr_t	busaddr;	/* Physical address */
127	vm_offset_t	datavaddr;	/* kva of client data */
128	bus_addr_t	dataaddr;	/* client physical address */
129	bus_size_t	datacount;	/* client data count */
130	STAILQ_ENTRY(bounce_page) links;
131};
132
133struct sync_list {
134	vm_offset_t	vaddr;		/* kva of bounce buffer */
135	bus_addr_t	busaddr;	/* Physical address */
136	bus_size_t	datacount;	/* client data count */
137};
138
139int busdma_swi_pending;
140
141struct bounce_zone {
142	STAILQ_ENTRY(bounce_zone) links;
143	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
144	int		total_bpages;
145	int		free_bpages;
146	int		reserved_bpages;
147	int		active_bpages;
148	int		total_bounced;
149	int		total_deferred;
150	int		map_count;
151	bus_size_t	alignment;
152	bus_addr_t	lowaddr;
153	char		zoneid[8];
154	char		lowaddrid[20];
155	struct sysctl_ctx_list sysctl_tree;
156	struct sysctl_oid *sysctl_tree_top;
157};
158
159static struct mtx bounce_lock;
160static int total_bpages;
161static int busdma_zonecount;
162static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
163
164static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
165SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
166	   "Total bounce pages");
167
168#define DMAMAP_COHERENT		0x8
169#define DMAMAP_CACHE_ALIGNED	0x10
170
171struct bus_dmamap {
172	struct bp_list	bpages;
173	int		pagesneeded;
174	int		pagesreserved;
175        bus_dma_tag_t	dmat;
176	struct memdesc	mem;
177	int		flags;
178	STAILQ_ENTRY(bus_dmamap) links;
179	bus_dmamap_callback_t *callback;
180	void		      *callback_arg;
181	int		       sync_count;
182	struct sync_list       *slist;
183};
184
185static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
186static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
187
188static struct mtx busdma_mtx;
189
190MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
191
192static void init_bounce_pages(void *dummy);
193static int alloc_bounce_zone(bus_dma_tag_t dmat);
194static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
195static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
196				int commit);
197static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
198				  vm_offset_t vaddr, bus_addr_t addr,
199				  bus_size_t size);
200static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
201
202/* Default tag, as most drivers provide no parent tag. */
203bus_dma_tag_t arm_root_dma_tag;
204
205/*
206 * ----------------------------------------------------------------------------
207 * Begin block of code useful to transplant to other implementations.
208 */
209
210static uma_zone_t dmamap_zone;	/* Cache of struct bus_dmamap items */
211
212static busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
213static busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
214
215/*
216 * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
217 * It'll need platform-specific changes if this code is copied.
218 */
219static int
220dmamap_ctor(void *mem, int size, void *arg, int flags)
221{
222	bus_dmamap_t map;
223	bus_dma_tag_t dmat;
224
225	map = (bus_dmamap_t)mem;
226	dmat = (bus_dma_tag_t)arg;
227
228	dmat->map_count++;
229
230	map->dmat = dmat;
231	map->flags = 0;
232	STAILQ_INIT(&map->bpages);
233
234	return (0);
235}
236
237/*
238 * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
239 * It may need platform-specific changes if this code is copied              .
240 */
241static void
242dmamap_dtor(void *mem, int size, void *arg)
243{
244	bus_dmamap_t map;
245
246	map = (bus_dmamap_t)mem;
247
248	map->dmat->map_count--;
249}
250
251static void
252busdma_init(void *dummy)
253{
254
255	/* Create a cache of maps for bus_dmamap_create(). */
256	dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
257	    dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
258
259	/* Create a cache of buffers in standard (cacheable) memory. */
260	standard_allocator = busdma_bufalloc_create("buffer",
261	    arm_dcache_align,	/* minimum_alignment */
262	    NULL,		/* uma_alloc func */
263	    NULL,		/* uma_free func */
264	    0);			/* uma_zcreate_flags */
265
266	/*
267	 * Create a cache of buffers in uncacheable memory, to implement the
268	 * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag.
269	 */
270	coherent_allocator = busdma_bufalloc_create("coherent",
271	    arm_dcache_align,	/* minimum_alignment */
272	    busdma_bufalloc_alloc_uncacheable,
273	    busdma_bufalloc_free_uncacheable,
274	    0);			/* uma_zcreate_flags */
275}
276
277/*
278 * This init historically used SI_SUB_VM, but now the init code requires
279 * malloc(9) using M_DEVBUF memory, which is set up later than SI_SUB_VM, by
280 * SI_SUB_KMEM and SI_ORDER_SECOND, so we'll go right after that by using
281 * SI_SUB_KMEM and SI_ORDER_THIRD.
282 */
283SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_THIRD, busdma_init, NULL);
284
285/*
286 * End block of code useful to transplant to other implementations.
287 * ----------------------------------------------------------------------------
288 */
289
290/*
291 * Return true if a match is made.
292 *
293 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
294 *
295 * If paddr is within the bounds of the dma tag then call the filter callback
296 * to check for a match, if there is no filter callback then assume a match.
297 */
298static int
299run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
300{
301	int retval;
302
303	retval = 0;
304
305	do {
306		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
307		 || ((paddr & (dmat->alignment - 1)) != 0))
308		 && (dmat->filter == NULL
309		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
310			retval = 1;
311
312		dmat = dmat->parent;
313	} while (retval == 0 && dmat != NULL);
314	return (retval);
315}
316
317/*
318 * This routine checks the exclusion zone constraints from a tag against the
319 * physical RAM available on the machine.  If a tag specifies an exclusion zone
320 * but there's no RAM in that zone, then we avoid allocating resources to bounce
321 * a request, and we can use any memory allocator (as opposed to needing
322 * kmem_alloc_contig() just because it can allocate pages in an address range).
323 *
324 * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the
325 * same value on 32-bit architectures) as their lowaddr constraint, and we can't
326 * possibly have RAM at an address higher than the highest address we can
327 * express, so we take a fast out.
328 */
329static __inline int
330_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
331{
332	int i;
333
334	if (lowaddr >= BUS_SPACE_MAXADDR)
335		return (0);
336
337	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
338		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
339		    || (lowaddr < phys_avail[i] &&
340		    highaddr > phys_avail[i]))
341			return (1);
342	}
343	return (0);
344}
345
346static __inline struct arm32_dma_range *
347_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
348    bus_addr_t curaddr)
349{
350	struct arm32_dma_range *dr;
351	int i;
352
353	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
354		if (curaddr >= dr->dr_sysbase &&
355		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
356			return (dr);
357	}
358
359	return (NULL);
360}
361/*
362 * Convenience function for manipulating driver locks from busdma (during
363 * busdma_swi, for example).  Drivers that don't provide their own locks
364 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
365 * non-mutex locking scheme don't have to use this at all.
366 */
367void
368busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
369{
370	struct mtx *dmtx;
371
372	dmtx = (struct mtx *)arg;
373	switch (op) {
374	case BUS_DMA_LOCK:
375		mtx_lock(dmtx);
376		break;
377	case BUS_DMA_UNLOCK:
378		mtx_unlock(dmtx);
379		break;
380	default:
381		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
382	}
383}
384
385/*
386 * dflt_lock should never get called.  It gets put into the dma tag when
387 * lockfunc == NULL, which is only valid if the maps that are associated
388 * with the tag are meant to never be defered.
389 * XXX Should have a way to identify which driver is responsible here.
390 */
391static void
392dflt_lock(void *arg, bus_dma_lock_op_t op)
393{
394#ifdef INVARIANTS
395	panic("driver error: busdma dflt_lock called");
396#else
397	printf("DRIVER_ERROR: busdma dflt_lock called\n");
398#endif
399}
400
401/*
402 * Allocate a device specific dma_tag.
403 */
404#define SEG_NB 1024
405
406int
407bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
408		   bus_addr_t boundary, bus_addr_t lowaddr,
409		   bus_addr_t highaddr, bus_dma_filter_t *filter,
410		   void *filterarg, bus_size_t maxsize, int nsegments,
411		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
412		   void *lockfuncarg, bus_dma_tag_t *dmat)
413{
414	bus_dma_tag_t newtag;
415	int error = 0;
416	/* Return a NULL tag on failure */
417	*dmat = NULL;
418	if (!parent)
419		parent = arm_root_dma_tag;
420
421	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
422	if (newtag == NULL) {
423		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
424		    __func__, newtag, 0, error);
425		return (ENOMEM);
426	}
427
428	newtag->parent = parent;
429	newtag->alignment = alignment ? alignment : 1;
430	newtag->boundary = boundary;
431	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
432	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
433	newtag->filter = filter;
434	newtag->filterarg = filterarg;
435        newtag->maxsize = maxsize;
436        newtag->nsegments = nsegments;
437	newtag->maxsegsz = maxsegsz;
438	newtag->flags = flags;
439	newtag->ref_count = 1; /* Count ourself */
440	newtag->map_count = 0;
441	newtag->ranges = bus_dma_get_range();
442	newtag->_nranges = bus_dma_get_range_nb();
443	if (lockfunc != NULL) {
444		newtag->lockfunc = lockfunc;
445		newtag->lockfuncarg = lockfuncarg;
446	} else {
447		newtag->lockfunc = dflt_lock;
448		newtag->lockfuncarg = NULL;
449	}
450	/*
451	 * If all the segments we need fit into the local tagsegs array, set the
452	 * pointer now.  Otherwise NULL the pointer and an array of segments
453	 * will be allocated later, on first use.  We don't pre-allocate now
454	 * because some tags exist just to pass contraints to children in the
455	 * device hierarchy, and they tend to use BUS_SPACE_UNRESTRICTED and we
456	 * sure don't want to try to allocate an array for that.
457	 */
458	if (newtag->nsegments <= nitems(newtag->tagsegs))
459		newtag->segments = newtag->tagsegs;
460	else
461		newtag->segments = NULL;
462	/*
463	 * Take into account any restrictions imposed by our parent tag
464	 */
465        if (parent != NULL) {
466                newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
467                newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
468		if (newtag->boundary == 0)
469			newtag->boundary = parent->boundary;
470		else if (parent->boundary != 0)
471                	newtag->boundary = MIN(parent->boundary,
472					       newtag->boundary);
473		if ((newtag->filter != NULL) ||
474		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
475			newtag->flags |= BUS_DMA_COULD_BOUNCE;
476                if (newtag->filter == NULL) {
477                        /*
478                         * Short circuit looking at our parent directly
479                         * since we have encapsulated all of its information
480                         */
481                        newtag->filter = parent->filter;
482                        newtag->filterarg = parent->filterarg;
483                        newtag->parent = parent->parent;
484		}
485		if (newtag->parent != NULL)
486			atomic_add_int(&parent->ref_count, 1);
487	}
488	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
489	 || newtag->alignment > 1)
490		newtag->flags |= BUS_DMA_COULD_BOUNCE;
491
492	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
493	    (flags & BUS_DMA_ALLOCNOW) != 0) {
494		struct bounce_zone *bz;
495
496		/* Must bounce */
497
498		if ((error = alloc_bounce_zone(newtag)) != 0) {
499			free(newtag, M_DEVBUF);
500			return (error);
501		}
502		bz = newtag->bounce_zone;
503
504		if (ptoa(bz->total_bpages) < maxsize) {
505			int pages;
506
507			pages = atop(maxsize) - bz->total_bpages;
508
509			/* Add pages to our bounce pool */
510			if (alloc_bounce_pages(newtag, pages) < pages)
511				error = ENOMEM;
512		}
513		/* Performed initial allocation */
514		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
515	} else
516		newtag->bounce_zone = NULL;
517	if (error != 0)
518		free(newtag, M_DEVBUF);
519	else
520		*dmat = newtag;
521	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
522	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
523
524	return (error);
525}
526
527int
528bus_dma_tag_destroy(bus_dma_tag_t dmat)
529{
530#ifdef KTR
531	bus_dma_tag_t dmat_copy = dmat;
532#endif
533
534	if (dmat != NULL) {
535
536                if (dmat->map_count != 0)
537                        return (EBUSY);
538
539                while (dmat != NULL) {
540                        bus_dma_tag_t parent;
541
542                        parent = dmat->parent;
543                        atomic_subtract_int(&dmat->ref_count, 1);
544                        if (dmat->ref_count == 0) {
545				if (dmat->segments != NULL &&
546				    dmat->segments != dmat->tagsegs)
547					free(dmat->segments, M_DEVBUF);
548                                free(dmat, M_DEVBUF);
549                                /*
550                                 * Last reference count, so
551                                 * release our reference
552                                 * count on our parent.
553                                 */
554                                dmat = parent;
555                        } else
556                                dmat = NULL;
557                }
558        }
559	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
560
561        return (0);
562}
563
564#include <sys/kdb.h>
565/*
566 * Allocate a handle for mapping from kva/uva/physical
567 * address space into bus device space.
568 */
569int
570bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
571{
572	struct sync_list *slist;
573	bus_dmamap_t map;
574	int error = 0;
575
576	slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
577	if (slist == NULL)
578		return (ENOMEM);
579
580	map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
581	*mapp = map;
582	if (map == NULL) {
583		free(slist, M_DEVBUF);
584		return (ENOMEM);
585	}
586
587	/*
588	 * If the tag's segments haven't been allocated yet we need to do it
589	 * now, because we can't sleep for resources at map load time.
590	 */
591	if (dmat->segments == NULL) {
592		dmat->segments = malloc(dmat->nsegments *
593		    sizeof(*dmat->segments), M_DEVBUF, M_NOWAIT);
594		if (dmat->segments == NULL) {
595			free(slist, M_DEVBUF);
596			uma_zfree(dmamap_zone, map);
597			*mapp = NULL;
598			return (ENOMEM);
599		}
600	}
601
602	/*
603	 * Bouncing might be required if the driver asks for an active
604	 * exclusion region, a data alignment that is stricter than 1, and/or
605	 * an active address boundary.
606	 */
607	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
608
609		/* Must bounce */
610		struct bounce_zone *bz;
611		int maxpages;
612
613		if (dmat->bounce_zone == NULL) {
614			if ((error = alloc_bounce_zone(dmat)) != 0) {
615				free(slist, M_DEVBUF);
616				uma_zfree(dmamap_zone, map);
617				*mapp = NULL;
618				return (error);
619			}
620		}
621		bz = dmat->bounce_zone;
622
623		/* Initialize the new map */
624		STAILQ_INIT(&((*mapp)->bpages));
625
626		/*
627		 * Attempt to add pages to our pool on a per-instance
628		 * basis up to a sane limit.
629		 */
630		maxpages = MAX_BPAGES;
631		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
632		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
633			int pages;
634
635			pages = MAX(atop(dmat->maxsize), 1);
636			pages = MIN(maxpages - bz->total_bpages, pages);
637			pages = MAX(pages, 1);
638			if (alloc_bounce_pages(dmat, pages) < pages)
639				error = ENOMEM;
640
641			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
642				if (error == 0)
643					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
644			} else {
645				error = 0;
646			}
647		}
648		bz->map_count++;
649	}
650	map->sync_count = 0;
651	map->slist = slist;
652	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
653	    __func__, dmat, dmat->flags, error);
654
655	return (0);
656}
657
658/*
659 * Destroy a handle for mapping from kva/uva/physical
660 * address space into bus device space.
661 */
662int
663bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
664{
665
666	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
667		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
668		    __func__, dmat, EBUSY);
669		return (EBUSY);
670	}
671	free(map->slist, M_DEVBUF);
672	uma_zfree(dmamap_zone, map);
673	if (dmat->bounce_zone)
674		dmat->bounce_zone->map_count--;
675	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
676        return (0);
677}
678
679/*
680 * Allocate a piece of memory that can be efficiently mapped into bus device
681 * space based on the constraints listed in the dma tag.  Returns a pointer to
682 * the allocated memory, and a pointer to an associated bus_dmamap.
683 */
684int
685bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddrp, int flags,
686                 bus_dmamap_t *mapp)
687{
688	struct sync_list *slist;
689	void * vaddr;
690	struct busdma_bufzone *bufzone;
691	busdma_bufalloc_t ba;
692	bus_dmamap_t map;
693	int mflags;
694	vm_memattr_t memattr;
695
696	if (flags & BUS_DMA_NOWAIT)
697		mflags = M_NOWAIT;
698	else
699		mflags = M_WAITOK;
700	/*
701	 * If the tag's segments haven't been allocated yet we need to do it
702	 * now, because we can't sleep for resources at map load time.
703	 */
704	if (dmat->segments == NULL)
705		dmat->segments = malloc(dmat->nsegments *
706		   sizeof(*dmat->segments), M_DEVBUF, mflags);
707
708	slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
709	if (slist == NULL)
710		return (ENOMEM);
711	map = uma_zalloc_arg(dmamap_zone, dmat, mflags);
712	if (map == NULL) {
713		free(slist, M_DEVBUF);
714		return (ENOMEM);
715	}
716	if (flags & BUS_DMA_COHERENT) {
717		memattr = VM_MEMATTR_UNCACHEABLE;
718		ba = coherent_allocator;
719		map->flags |= DMAMAP_COHERENT;
720	} else {
721		memattr = VM_MEMATTR_DEFAULT;
722		ba = standard_allocator;
723	}
724	/* All buffers we allocate are cache-aligned. */
725	map->flags |= DMAMAP_CACHE_ALIGNED;
726
727	if (flags & BUS_DMA_ZERO)
728		mflags |= M_ZERO;
729
730	/*
731	 * Try to find a bufzone in the allocator that holds a cache of buffers
732	 * of the right size for this request.  If the buffer is too big to be
733	 * held in the allocator cache, this returns NULL.
734	 */
735	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
736
737	/*
738	 * Allocate the buffer from the uma(9) allocator if...
739	 *  - It's small enough to be in the allocator (bufzone not NULL).
740	 *  - The alignment constraint isn't larger than the allocation size
741	 *    (the allocator aligns buffers to their size boundaries).
742	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
743	 * else allocate non-contiguous pages if...
744	 *  - The page count that could get allocated doesn't exceed nsegments.
745	 *  - The alignment constraint isn't larger than a page boundary.
746	 *  - There are no boundary-crossing constraints.
747	 * else allocate a block of contiguous pages because one or more of the
748	 * constraints is something that only the contig allocator can fulfill.
749	 */
750	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
751	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
752		vaddr = uma_zalloc(bufzone->umazone, mflags);
753	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
754	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
755		vaddr = (void *)kmem_alloc_attr(kernel_map, dmat->maxsize,
756		    mflags, 0, dmat->lowaddr, memattr);
757	} else {
758		vaddr = (void *)kmem_alloc_contig(kernel_map, dmat->maxsize,
759		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
760		    memattr);
761	}
762	if (vaddr == NULL) {
763		free(slist, M_DEVBUF);
764		uma_zfree(dmamap_zone, map);
765		map = NULL;
766	} else {
767		map->slist = slist;
768		map->sync_count = 0;
769	}
770	*vaddrp = vaddr;
771	*mapp = map;
772
773	return (vaddr == NULL ? ENOMEM : 0);
774}
775
776/*
777 * Free a piece of memory that was allocated via bus_dmamem_alloc, along with
778 * its associated map.
779 */
780void
781bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
782{
783	struct busdma_bufzone *bufzone;
784	busdma_bufalloc_t ba;
785
786	if (map->flags & DMAMAP_COHERENT)
787		ba = coherent_allocator;
788	else
789		ba = standard_allocator;
790	uma_zfree(dmamap_zone, map);
791
792	free(map->slist, M_DEVBUF);
793	/* Be careful not to access map from here on. */
794
795	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
796
797	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
798	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
799		uma_zfree(bufzone->umazone, vaddr);
800	else
801		kmem_free(kernel_map, (vm_offset_t)vaddr, dmat->maxsize);
802}
803
804static void
805_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
806    bus_size_t buflen, int flags)
807{
808	bus_addr_t curaddr;
809	bus_size_t sgsize;
810
811	if ((map->pagesneeded == 0)) {
812		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
813		    dmat->lowaddr, dmat->boundary, dmat->alignment);
814		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
815		    map, map->pagesneeded);
816		/*
817		 * Count the number of bounce pages
818		 * needed in order to complete this transfer
819		 */
820		curaddr = buf;
821		while (buflen != 0) {
822			sgsize = MIN(buflen, dmat->maxsegsz);
823			if (run_filter(dmat, curaddr) != 0) {
824				sgsize = MIN(sgsize, PAGE_SIZE);
825				map->pagesneeded++;
826			}
827			curaddr += sgsize;
828			buflen -= sgsize;
829		}
830		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
831	}
832}
833
834static void
835_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
836    void *buf, bus_size_t buflen, int flags)
837{
838	vm_offset_t vaddr;
839	vm_offset_t vendaddr;
840	bus_addr_t paddr;
841
842	if ((map->pagesneeded == 0)) {
843		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
844		    dmat->lowaddr, dmat->boundary, dmat->alignment);
845		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
846		    map, map->pagesneeded);
847		/*
848		 * Count the number of bounce pages
849		 * needed in order to complete this transfer
850		 */
851		vaddr = trunc_page((vm_offset_t)buf);
852		vendaddr = (vm_offset_t)buf + buflen;
853
854		while (vaddr < vendaddr) {
855			if (__predict_true(pmap == kernel_pmap))
856				paddr = pmap_kextract(vaddr);
857			else
858				paddr = pmap_extract(pmap, vaddr);
859			if (run_filter(dmat, paddr) != 0)
860				map->pagesneeded++;
861			vaddr += PAGE_SIZE;
862		}
863		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
864	}
865}
866
867static int
868_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags)
869{
870
871	/* Reserve Necessary Bounce Pages */
872	mtx_lock(&bounce_lock);
873	if (flags & BUS_DMA_NOWAIT) {
874		if (reserve_bounce_pages(dmat, map, 0) != 0) {
875			mtx_unlock(&bounce_lock);
876			return (ENOMEM);
877		}
878	} else {
879		if (reserve_bounce_pages(dmat, map, 1) != 0) {
880			/* Queue us for resources */
881			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
882			mtx_unlock(&bounce_lock);
883			return (EINPROGRESS);
884		}
885	}
886	mtx_unlock(&bounce_lock);
887
888	return (0);
889}
890
891/*
892 * Add a single contiguous physical range to the segment list.
893 */
894static int
895_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
896    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
897{
898	bus_addr_t baddr, bmask;
899	int seg;
900
901	/*
902	 * Make sure we don't cross any boundaries.
903	 */
904	bmask = ~(dmat->boundary - 1);
905	if (dmat->boundary > 0) {
906		baddr = (curaddr + dmat->boundary) & bmask;
907		if (sgsize > (baddr - curaddr))
908			sgsize = (baddr - curaddr);
909	}
910	if (dmat->ranges) {
911		struct arm32_dma_range *dr;
912
913		dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
914		    curaddr);
915		if (dr == NULL)
916			return (0);
917		/*
918		 * In a valid DMA range.  Translate the physical
919		 * memory address to an address in the DMA window.
920		 */
921		curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
922
923	}
924
925	seg = *segp;
926	/*
927	 * Insert chunk into a segment, coalescing with
928	 * the previous segment if possible.
929	 */
930	if (seg >= 0 &&
931	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
932	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
933	    (dmat->boundary == 0 ||
934	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
935		segs[seg].ds_len += sgsize;
936	} else {
937		if (++seg >= dmat->nsegments)
938			return (0);
939		segs[seg].ds_addr = curaddr;
940		segs[seg].ds_len = sgsize;
941	}
942	*segp = seg;
943	return (sgsize);
944}
945
946/*
947 * Utility function to load a physical buffer.  segp contains
948 * the starting segment on entrace, and the ending segment on exit.
949 */
950int
951_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
952    bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
953{
954	bus_size_t sgsize;
955	bus_addr_t curaddr;
956	int error;
957
958	if (segs == NULL)
959		segs = dmat->segments;
960
961	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
962		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
963		if (map->pagesneeded != 0) {
964			error = _bus_dmamap_reserve_pages(dmat, map, flags);
965			if (error)
966				return (error);
967		}
968	}
969
970	while (buflen > 0) {
971		curaddr = buf;
972		sgsize = MIN(buflen, dmat->maxsegsz);
973		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
974		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
975			sgsize = MIN(sgsize, PAGE_SIZE);
976			curaddr = add_bounce_page(dmat, map, 0, curaddr,
977			    sgsize);
978		}
979		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
980		    segp);
981		if (sgsize == 0)
982			break;
983		buf += sgsize;
984		buflen -= sgsize;
985	}
986
987	/*
988	 * Did we fit?
989	 */
990	if (buflen != 0) {
991		_bus_dmamap_unload(dmat, map);
992		return (EFBIG); /* XXX better return value here? */
993	}
994	return (0);
995}
996/*
997 * Utility function to load a linear buffer.  segp contains
998 * the starting segment on entrance, and the ending segment on exit.
999 */
1000int
1001_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
1002    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
1003    int *segp)
1004{
1005	bus_size_t sgsize;
1006	bus_addr_t curaddr;
1007	struct sync_list *sl;
1008	vm_offset_t vaddr = (vm_offset_t)buf;
1009	int error = 0;
1010
1011	if (segs == NULL)
1012		segs = dmat->segments;
1013	if ((flags & BUS_DMA_LOAD_MBUF) != 0)
1014		map->flags |= DMAMAP_CACHE_ALIGNED;
1015
1016	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
1017		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
1018		if (map->pagesneeded != 0) {
1019			error = _bus_dmamap_reserve_pages(dmat, map, flags);
1020			if (error)
1021				return (error);
1022		}
1023	}
1024	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
1025	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
1026
1027	while (buflen > 0) {
1028		/*
1029		 * Get the physical address for this segment.
1030		 */
1031		if (__predict_true(pmap == kernel_pmap)) {
1032			curaddr = pmap_kextract(vaddr);
1033		} else {
1034			curaddr = pmap_extract(pmap, vaddr);
1035			map->flags &= ~DMAMAP_COHERENT;
1036		}
1037
1038		/*
1039		 * Compute the segment size, and adjust counts.
1040		 */
1041		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
1042		if (sgsize > dmat->maxsegsz)
1043			sgsize = dmat->maxsegsz;
1044		if (buflen < sgsize)
1045			sgsize = buflen;
1046
1047		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
1048		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
1049			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
1050			    sgsize);
1051		} else {
1052			sl = &map->slist[map->sync_count - 1];
1053			if (map->sync_count == 0 ||
1054			    vaddr != sl->vaddr + sl->datacount) {
1055				if (++map->sync_count > dmat->nsegments)
1056					goto cleanup;
1057				sl++;
1058				sl->vaddr = vaddr;
1059				sl->datacount = sgsize;
1060				sl->busaddr = curaddr;
1061			} else
1062				sl->datacount += sgsize;
1063		}
1064		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1065		    segp);
1066		if (sgsize == 0)
1067			break;
1068		vaddr += sgsize;
1069		buflen -= sgsize;
1070	}
1071
1072cleanup:
1073	/*
1074	 * Did we fit?
1075	 */
1076	if (buflen != 0) {
1077		_bus_dmamap_unload(dmat, map);
1078		return (EFBIG); /* XXX better return value here? */
1079	}
1080	return (0);
1081}
1082
1083void
1084__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1085		    struct memdesc *mem, bus_dmamap_callback_t *callback,
1086		    void *callback_arg)
1087{
1088
1089	KASSERT(dmat != NULL, ("dmatag is NULL"));
1090	KASSERT(map != NULL, ("dmamap is NULL"));
1091	map->mem = *mem;
1092	map->callback = callback;
1093	map->callback_arg = callback_arg;
1094}
1095
1096bus_dma_segment_t *
1097_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1098		     bus_dma_segment_t *segs, int nsegs, int error)
1099{
1100
1101	if (segs == NULL)
1102		segs = dmat->segments;
1103	return (segs);
1104}
1105
1106/*
1107 * Release the mapping held by map.
1108 */
1109void
1110_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1111{
1112	struct bounce_page *bpage;
1113
1114	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1115		STAILQ_REMOVE_HEAD(&map->bpages, links);
1116		free_bounce_page(dmat, bpage);
1117	}
1118	map->sync_count = 0;
1119	return;
1120}
1121
1122static void
1123bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op,
1124    int bufaligned)
1125{
1126	char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
1127	register_t s;
1128	int partial;
1129
1130	if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) {
1131		cpu_dcache_wb_range(buf, len);
1132		cpu_l2cache_wb_range(buf, len);
1133	}
1134
1135	/*
1136	 * If the caller promises the buffer is properly aligned to a cache line
1137	 * (even if the call parms make it look like it isn't) we can avoid
1138	 * attempting to preserve the non-DMA part of the cache line in the
1139	 * POSTREAD case, but we MUST still do a writeback in the PREREAD case.
1140	 *
1141	 * This covers the case of mbufs, where we know how they're aligned and
1142	 * know the CPU doesn't touch the header in front of the DMA data area
1143	 * during the IO, but it may have touched it right before invoking the
1144	 * sync, so a PREREAD writeback is required.
1145	 *
1146	 * It also handles buffers we created in bus_dmamem_alloc(), which are
1147	 * always aligned and padded to cache line size even if the IO length
1148	 * isn't a multiple of cache line size.  In this case the PREREAD
1149	 * writeback probably isn't required, but it's harmless.
1150	 */
1151	partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask;
1152
1153	if (op & BUS_DMASYNC_PREREAD) {
1154		if (!(op & BUS_DMASYNC_PREWRITE) && !partial) {
1155			cpu_dcache_inv_range(buf, len);
1156			cpu_l2cache_inv_range(buf, len);
1157		} else {
1158		    	cpu_dcache_wbinv_range(buf, len);
1159	    		cpu_l2cache_wbinv_range(buf, len);
1160		}
1161	}
1162	if (op & BUS_DMASYNC_POSTREAD) {
1163		if (partial && !bufaligned) {
1164			s = intr_disable();
1165			if (buf & arm_dcache_align_mask)
1166				memcpy(_tmp_cl, (void *)(buf &
1167				    ~arm_dcache_align_mask),
1168				    buf & arm_dcache_align_mask);
1169			if ((buf + len) & arm_dcache_align_mask)
1170				memcpy(_tmp_clend,
1171				    (void *)(buf + len),
1172				    arm_dcache_align -
1173				    ((buf + len) & arm_dcache_align_mask));
1174		}
1175		cpu_dcache_inv_range(buf, len);
1176		cpu_l2cache_inv_range(buf, len);
1177		if (partial && !bufaligned) {
1178			if (buf & arm_dcache_align_mask)
1179				memcpy((void *)(buf &
1180				    ~arm_dcache_align_mask), _tmp_cl,
1181				    buf & arm_dcache_align_mask);
1182			if ((buf + len) & arm_dcache_align_mask)
1183				memcpy((void *)(buf + len),
1184				    _tmp_clend, arm_dcache_align -
1185				    ((buf + len) & arm_dcache_align_mask));
1186			intr_restore(s);
1187		}
1188	}
1189}
1190
1191static void
1192_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1193{
1194	struct bounce_page *bpage;
1195
1196	STAILQ_FOREACH(bpage, &map->bpages, links) {
1197		if (op & BUS_DMASYNC_PREWRITE) {
1198			if (bpage->datavaddr != 0)
1199				bcopy((void *)bpage->datavaddr,
1200				    (void *)(bpage->vaddr_nocache != 0 ?
1201					     bpage->vaddr_nocache :
1202					     bpage->vaddr),
1203				    bpage->datacount);
1204			else
1205				physcopyout(bpage->dataaddr,
1206				    (void *)(bpage->vaddr_nocache != 0 ?
1207					     bpage->vaddr_nocache :
1208					     bpage->vaddr),
1209				    bpage->datacount);
1210			if (bpage->vaddr_nocache == 0) {
1211				cpu_dcache_wb_range(bpage->vaddr,
1212				    bpage->datacount);
1213				cpu_l2cache_wb_range(bpage->vaddr,
1214				    bpage->datacount);
1215			}
1216			dmat->bounce_zone->total_bounced++;
1217		}
1218		if (op & BUS_DMASYNC_POSTREAD) {
1219			if (bpage->vaddr_nocache == 0) {
1220				cpu_dcache_inv_range(bpage->vaddr,
1221				    bpage->datacount);
1222				cpu_l2cache_inv_range(bpage->vaddr,
1223				    bpage->datacount);
1224			}
1225			if (bpage->datavaddr != 0)
1226				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1227				    bpage->vaddr_nocache : bpage->vaddr),
1228				    (void *)bpage->datavaddr, bpage->datacount);
1229			else
1230				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1231				    bpage->vaddr_nocache : bpage->vaddr),
1232				    bpage->dataaddr, bpage->datacount);
1233			dmat->bounce_zone->total_bounced++;
1234		}
1235	}
1236}
1237
1238void
1239_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1240{
1241	struct sync_list *sl, *end;
1242	int bufaligned;
1243
1244	if (op == BUS_DMASYNC_POSTWRITE)
1245		return;
1246	if (map->flags & DMAMAP_COHERENT)
1247		goto drain;
1248	if (STAILQ_FIRST(&map->bpages))
1249		_bus_dmamap_sync_bp(dmat, map, op);
1250	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1251	bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED);
1252	if (map->sync_count) {
1253		end = &map->slist[map->sync_count];
1254		for (sl = &map->slist[0]; sl != end; sl++)
1255			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
1256			    bufaligned);
1257	}
1258
1259drain:
1260
1261	cpu_drain_writebuf();
1262}
1263
1264static void
1265init_bounce_pages(void *dummy __unused)
1266{
1267
1268	total_bpages = 0;
1269	STAILQ_INIT(&bounce_zone_list);
1270	STAILQ_INIT(&bounce_map_waitinglist);
1271	STAILQ_INIT(&bounce_map_callbacklist);
1272	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1273}
1274SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1275
1276static struct sysctl_ctx_list *
1277busdma_sysctl_tree(struct bounce_zone *bz)
1278{
1279	return (&bz->sysctl_tree);
1280}
1281
1282static struct sysctl_oid *
1283busdma_sysctl_tree_top(struct bounce_zone *bz)
1284{
1285	return (bz->sysctl_tree_top);
1286}
1287
1288static int
1289alloc_bounce_zone(bus_dma_tag_t dmat)
1290{
1291	struct bounce_zone *bz;
1292
1293	/* Check to see if we already have a suitable zone */
1294	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1295		if ((dmat->alignment <= bz->alignment)
1296		 && (dmat->lowaddr >= bz->lowaddr)) {
1297			dmat->bounce_zone = bz;
1298			return (0);
1299		}
1300	}
1301
1302	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1303	    M_NOWAIT | M_ZERO)) == NULL)
1304		return (ENOMEM);
1305
1306	STAILQ_INIT(&bz->bounce_page_list);
1307	bz->free_bpages = 0;
1308	bz->reserved_bpages = 0;
1309	bz->active_bpages = 0;
1310	bz->lowaddr = dmat->lowaddr;
1311	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1312	bz->map_count = 0;
1313	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1314	busdma_zonecount++;
1315	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1316	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1317	dmat->bounce_zone = bz;
1318
1319	sysctl_ctx_init(&bz->sysctl_tree);
1320	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1321	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1322	    CTLFLAG_RD, 0, "");
1323	if (bz->sysctl_tree_top == NULL) {
1324		sysctl_ctx_free(&bz->sysctl_tree);
1325		return (0);	/* XXX error code? */
1326	}
1327
1328	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1329	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1330	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1331	    "Total bounce pages");
1332	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1333	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1334	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1335	    "Free bounce pages");
1336	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1337	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1338	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1339	    "Reserved bounce pages");
1340	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1341	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1342	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1343	    "Active bounce pages");
1344	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1345	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1346	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1347	    "Total bounce requests");
1348	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1349	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1350	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1351	    "Total bounce requests that were deferred");
1352	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1353	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1354	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1355	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1356	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1357	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1358
1359	return (0);
1360}
1361
1362static int
1363alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1364{
1365	struct bounce_zone *bz;
1366	int count;
1367
1368	bz = dmat->bounce_zone;
1369	count = 0;
1370	while (numpages > 0) {
1371		struct bounce_page *bpage;
1372
1373		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1374						     M_NOWAIT | M_ZERO);
1375
1376		if (bpage == NULL)
1377			break;
1378		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1379							 M_NOWAIT, 0ul,
1380							 bz->lowaddr,
1381							 PAGE_SIZE,
1382							 0);
1383		if (bpage->vaddr == 0) {
1384			free(bpage, M_DEVBUF);
1385			break;
1386		}
1387		bpage->busaddr = pmap_kextract(bpage->vaddr);
1388		bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache(
1389		    (void *)bpage->vaddr, PAGE_SIZE);
1390		mtx_lock(&bounce_lock);
1391		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1392		total_bpages++;
1393		bz->total_bpages++;
1394		bz->free_bpages++;
1395		mtx_unlock(&bounce_lock);
1396		count++;
1397		numpages--;
1398	}
1399	return (count);
1400}
1401
1402static int
1403reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1404{
1405	struct bounce_zone *bz;
1406	int pages;
1407
1408	mtx_assert(&bounce_lock, MA_OWNED);
1409	bz = dmat->bounce_zone;
1410	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1411	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1412		return (map->pagesneeded - (map->pagesreserved + pages));
1413	bz->free_bpages -= pages;
1414	bz->reserved_bpages += pages;
1415	map->pagesreserved += pages;
1416	pages = map->pagesneeded - map->pagesreserved;
1417
1418	return (pages);
1419}
1420
1421static bus_addr_t
1422add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1423		bus_addr_t addr, bus_size_t size)
1424{
1425	struct bounce_zone *bz;
1426	struct bounce_page *bpage;
1427
1428	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1429	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1430
1431	bz = dmat->bounce_zone;
1432	if (map->pagesneeded == 0)
1433		panic("add_bounce_page: map doesn't need any pages");
1434	map->pagesneeded--;
1435
1436	if (map->pagesreserved == 0)
1437		panic("add_bounce_page: map doesn't need any pages");
1438	map->pagesreserved--;
1439
1440	mtx_lock(&bounce_lock);
1441	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1442	if (bpage == NULL)
1443		panic("add_bounce_page: free page list is empty");
1444
1445	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1446	bz->reserved_bpages--;
1447	bz->active_bpages++;
1448	mtx_unlock(&bounce_lock);
1449
1450	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1451		/* Page offset needs to be preserved. */
1452		bpage->vaddr |= vaddr & PAGE_MASK;
1453		bpage->busaddr |= vaddr & PAGE_MASK;
1454	}
1455	bpage->datavaddr = vaddr;
1456	bpage->dataaddr = addr;
1457	bpage->datacount = size;
1458	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1459	return (bpage->busaddr);
1460}
1461
1462static void
1463free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1464{
1465	struct bus_dmamap *map;
1466	struct bounce_zone *bz;
1467
1468	bz = dmat->bounce_zone;
1469	bpage->datavaddr = 0;
1470	bpage->datacount = 0;
1471	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1472		/*
1473		 * Reset the bounce page to start at offset 0.  Other uses
1474		 * of this bounce page may need to store a full page of
1475		 * data and/or assume it starts on a page boundary.
1476		 */
1477		bpage->vaddr &= ~PAGE_MASK;
1478		bpage->busaddr &= ~PAGE_MASK;
1479	}
1480
1481	mtx_lock(&bounce_lock);
1482	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1483	bz->free_bpages++;
1484	bz->active_bpages--;
1485	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1486		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1487			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1488			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1489					   map, links);
1490			busdma_swi_pending = 1;
1491			bz->total_deferred++;
1492			swi_sched(vm_ih, 0);
1493		}
1494	}
1495	mtx_unlock(&bounce_lock);
1496}
1497
1498void
1499busdma_swi(void)
1500{
1501	bus_dma_tag_t dmat;
1502	struct bus_dmamap *map;
1503
1504	mtx_lock(&bounce_lock);
1505	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1506		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1507		mtx_unlock(&bounce_lock);
1508		dmat = map->dmat;
1509		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1510		bus_dmamap_load_mem(map->dmat, map, &map->mem,
1511		    map->callback, map->callback_arg, BUS_DMA_WAITOK);
1512		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1513		mtx_lock(&bounce_lock);
1514	}
1515	mtx_unlock(&bounce_lock);
1516}
1517