busdma_machdep-v4.c revision 185494
1/*-
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 185494 2008-11-30 22:58:27Z stas $");
33
34/*
35 * ARM bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/lock.h>
45#include <sys/proc.h>
46#include <sys/mutex.h>
47#include <sys/mbuf.h>
48#include <sys/uio.h>
49#include <sys/ktr.h>
50#include <sys/kernel.h>
51#include <sys/sysctl.h>
52
53#include <vm/vm.h>
54#include <vm/vm_page.h>
55#include <vm/vm_map.h>
56
57#include <machine/atomic.h>
58#include <machine/bus.h>
59#include <machine/cpufunc.h>
60#include <machine/md_var.h>
61
62#define MAX_BPAGES 64
63#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
64#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
65
66struct bounce_zone;
67
68struct bus_dma_tag {
69	bus_dma_tag_t		parent;
70	bus_size_t		alignment;
71	bus_size_t		boundary;
72	bus_addr_t		lowaddr;
73	bus_addr_t		highaddr;
74	bus_dma_filter_t	*filter;
75	void			*filterarg;
76	bus_size_t		maxsize;
77	u_int			nsegments;
78	bus_size_t		maxsegsz;
79	int			flags;
80	int			ref_count;
81	int			map_count;
82	bus_dma_lock_t		*lockfunc;
83	void			*lockfuncarg;
84	/*
85	 * DMA range for this tag.  If the page doesn't fall within
86	 * one of these ranges, an error is returned.  The caller
87	 * may then decide what to do with the transfer.  If the
88	 * range pointer is NULL, it is ignored.
89	 */
90	struct arm32_dma_range	*ranges;
91	int			_nranges;
92	struct bounce_zone *bounce_zone;
93};
94
95struct bounce_page {
96	vm_offset_t	vaddr;		/* kva of bounce buffer */
97	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
98	bus_addr_t	busaddr;	/* Physical address */
99	vm_offset_t	datavaddr;	/* kva of client data */
100	bus_size_t	datacount;	/* client data count */
101	STAILQ_ENTRY(bounce_page) links;
102};
103
104int busdma_swi_pending;
105
106struct bounce_zone {
107	STAILQ_ENTRY(bounce_zone) links;
108	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
109	int		total_bpages;
110	int		free_bpages;
111	int		reserved_bpages;
112	int		active_bpages;
113	int		total_bounced;
114	int		total_deferred;
115	bus_size_t	alignment;
116	bus_size_t	boundary;
117	bus_addr_t	lowaddr;
118	char		zoneid[8];
119	char		lowaddrid[20];
120	struct sysctl_ctx_list sysctl_tree;
121	struct sysctl_oid *sysctl_tree_top;
122};
123
124static struct mtx bounce_lock;
125static int total_bpages;
126static int busdma_zonecount;
127static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
128
129SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
130SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
131	   "Total bounce pages");
132
133#define DMAMAP_LINEAR		0x1
134#define DMAMAP_MBUF		0x2
135#define DMAMAP_UIO		0x4
136#define DMAMAP_ALLOCATED	0x10
137#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
138#define DMAMAP_COHERENT		0x8
139struct bus_dmamap {
140	struct bp_list	bpages;
141	int		pagesneeded;
142	int		pagesreserved;
143        bus_dma_tag_t	dmat;
144	int		flags;
145	void 		*buffer;
146	void		*origbuffer;
147	void		*allocbuffer;
148	TAILQ_ENTRY(bus_dmamap)	freelist;
149	int		len;
150	STAILQ_ENTRY(bus_dmamap) links;
151	bus_dmamap_callback_t *callback;
152	void		      *callback_arg;
153
154};
155
156static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158
159static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
160	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
161
162#define BUSDMA_STATIC_MAPS	500
163static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
164
165static struct mtx busdma_mtx;
166
167MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
168
169static void init_bounce_pages(void *dummy);
170static int alloc_bounce_zone(bus_dma_tag_t dmat);
171static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
172static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
173				int commit);
174static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
175				   vm_offset_t vaddr, bus_size_t size);
176static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
177
178/* Default tag, as most drivers provide no parent tag. */
179bus_dma_tag_t arm_root_dma_tag;
180
181/*
182 * Return true if a match is made.
183 *
184 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
185 *
186 * If paddr is within the bounds of the dma tag then call the filter callback
187 * to check for a match, if there is no filter callback then assume a match.
188 */
189static int
190run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
191{
192	int retval;
193
194	retval = 0;
195
196	do {
197		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
198		 || ((paddr & (dmat->alignment - 1)) != 0))
199		 && (dmat->filter == NULL
200		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
201			retval = 1;
202
203		dmat = dmat->parent;
204	} while (retval == 0 && dmat != NULL);
205	return (retval);
206}
207
208static void
209arm_dmamap_freelist_init(void *dummy)
210{
211	int i;
212
213	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
214		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
215}
216
217SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL);
218
219/*
220 * Check to see if the specified page is in an allowed DMA range.
221 */
222
223static __inline int
224bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
225    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
226    int flags, vm_offset_t *lastaddrp, int *segp);
227
228static __inline int
229_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
230{
231	int i;
232	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
233		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
234		    || (lowaddr < phys_avail[i] &&
235		    highaddr > phys_avail[i]))
236			return (1);
237	}
238	return (0);
239}
240
241static __inline struct arm32_dma_range *
242_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
243    bus_addr_t curaddr)
244{
245	struct arm32_dma_range *dr;
246	int i;
247
248	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
249		if (curaddr >= dr->dr_sysbase &&
250		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
251			return (dr);
252	}
253
254	return (NULL);
255}
256/*
257 * Convenience function for manipulating driver locks from busdma (during
258 * busdma_swi, for example).  Drivers that don't provide their own locks
259 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
260 * non-mutex locking scheme don't have to use this at all.
261 */
262void
263busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
264{
265	struct mtx *dmtx;
266
267	dmtx = (struct mtx *)arg;
268	switch (op) {
269	case BUS_DMA_LOCK:
270		mtx_lock(dmtx);
271		break;
272	case BUS_DMA_UNLOCK:
273		mtx_unlock(dmtx);
274		break;
275	default:
276		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
277	}
278}
279
280/*
281 * dflt_lock should never get called.  It gets put into the dma tag when
282 * lockfunc == NULL, which is only valid if the maps that are associated
283 * with the tag are meant to never be defered.
284 * XXX Should have a way to identify which driver is responsible here.
285 */
286static void
287dflt_lock(void *arg, bus_dma_lock_op_t op)
288{
289#ifdef INVARIANTS
290	panic("driver error: busdma dflt_lock called");
291#else
292	printf("DRIVER_ERROR: busdma dflt_lock called\n");
293#endif
294}
295
296static __inline bus_dmamap_t
297_busdma_alloc_dmamap(void)
298{
299	bus_dmamap_t map;
300
301	mtx_lock(&busdma_mtx);
302	map = TAILQ_FIRST(&dmamap_freelist);
303	if (map)
304		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
305	mtx_unlock(&busdma_mtx);
306	if (!map) {
307		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
308		if (map)
309			map->flags = DMAMAP_ALLOCATED;
310	} else
311		map->flags = 0;
312	STAILQ_INIT(&map->bpages);
313	return (map);
314}
315
316static __inline void
317_busdma_free_dmamap(bus_dmamap_t map)
318{
319	if (map->flags & DMAMAP_ALLOCATED)
320		free(map, M_DEVBUF);
321	else {
322		mtx_lock(&busdma_mtx);
323		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
324		mtx_unlock(&busdma_mtx);
325	}
326}
327
328/*
329 * Allocate a device specific dma_tag.
330 */
331#define SEG_NB 1024
332
333int
334bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
335		   bus_size_t boundary, bus_addr_t lowaddr,
336		   bus_addr_t highaddr, bus_dma_filter_t *filter,
337		   void *filterarg, bus_size_t maxsize, int nsegments,
338		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
339		   void *lockfuncarg, bus_dma_tag_t *dmat)
340{
341	bus_dma_tag_t newtag;
342	int error = 0;
343	/* Return a NULL tag on failure */
344	*dmat = NULL;
345	if (!parent)
346		parent = arm_root_dma_tag;
347
348	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
349	if (newtag == NULL) {
350		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
351		    __func__, newtag, 0, error);
352		return (ENOMEM);
353	}
354
355	newtag->parent = parent;
356	newtag->alignment = alignment;
357	newtag->boundary = boundary;
358	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
359	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
360	newtag->filter = filter;
361	newtag->filterarg = filterarg;
362        newtag->maxsize = maxsize;
363        newtag->nsegments = nsegments;
364	newtag->maxsegsz = maxsegsz;
365	newtag->flags = flags;
366	newtag->ref_count = 1; /* Count ourself */
367	newtag->map_count = 0;
368	newtag->ranges = bus_dma_get_range();
369	newtag->_nranges = bus_dma_get_range_nb();
370	if (lockfunc != NULL) {
371		newtag->lockfunc = lockfunc;
372		newtag->lockfuncarg = lockfuncarg;
373	} else {
374		newtag->lockfunc = dflt_lock;
375		newtag->lockfuncarg = NULL;
376	}
377        /*
378	 * Take into account any restrictions imposed by our parent tag
379	 */
380        if (parent != NULL) {
381                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
382                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
383		if (newtag->boundary == 0)
384			newtag->boundary = parent->boundary;
385		else if (parent->boundary != 0)
386                	newtag->boundary = min(parent->boundary,
387					       newtag->boundary);
388		if ((newtag->filter != NULL) ||
389		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
390			newtag->flags |= BUS_DMA_COULD_BOUNCE;
391                if (newtag->filter == NULL) {
392                        /*
393                         * Short circuit looking at our parent directly
394                         * since we have encapsulated all of its information
395                         */
396                        newtag->filter = parent->filter;
397                        newtag->filterarg = parent->filterarg;
398                        newtag->parent = parent->parent;
399		}
400		if (newtag->parent != NULL)
401			atomic_add_int(&parent->ref_count, 1);
402	}
403	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
404	 || newtag->alignment > 1)
405		newtag->flags |= BUS_DMA_COULD_BOUNCE;
406
407	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
408	    (flags & BUS_DMA_ALLOCNOW) != 0) {
409		struct bounce_zone *bz;
410
411		/* Must bounce */
412
413		if ((error = alloc_bounce_zone(newtag)) != 0) {
414			free(newtag, M_DEVBUF);
415			return (error);
416		}
417		bz = newtag->bounce_zone;
418
419		if (ptoa(bz->total_bpages) < maxsize) {
420			int pages;
421
422			pages = atop(maxsize) - bz->total_bpages;
423
424			/* Add pages to our bounce pool */
425			if (alloc_bounce_pages(newtag, pages) < pages)
426				error = ENOMEM;
427		}
428		/* Performed initial allocation */
429		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
430	} else
431		newtag->bounce_zone = NULL;
432	if (error != 0)
433		free(newtag, M_DEVBUF);
434	else
435		*dmat = newtag;
436	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
437	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
438
439	return (error);
440}
441
442int
443bus_dma_tag_destroy(bus_dma_tag_t dmat)
444{
445#ifdef KTR
446	bus_dma_tag_t dmat_copy = dmat;
447#endif
448
449	if (dmat != NULL) {
450
451                if (dmat->map_count != 0)
452                        return (EBUSY);
453
454                while (dmat != NULL) {
455                        bus_dma_tag_t parent;
456
457                        parent = dmat->parent;
458                        atomic_subtract_int(&dmat->ref_count, 1);
459                        if (dmat->ref_count == 0) {
460                                free(dmat, M_DEVBUF);
461                                /*
462                                 * Last reference count, so
463                                 * release our reference
464                                 * count on our parent.
465                                 */
466                                dmat = parent;
467                        } else
468                                dmat = NULL;
469                }
470        }
471	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
472
473        return (0);
474}
475
476#include <sys/kdb.h>
477/*
478 * Allocate a handle for mapping from kva/uva/physical
479 * address space into bus device space.
480 */
481int
482bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
483{
484	bus_dmamap_t newmap;
485	int error = 0;
486
487	newmap = _busdma_alloc_dmamap();
488	if (newmap == NULL) {
489		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
490		return (ENOMEM);
491	}
492	*mapp = newmap;
493	newmap->dmat = dmat;
494	newmap->allocbuffer = NULL;
495	dmat->map_count++;
496
497	/*
498	 * Bouncing might be required if the driver asks for an active
499	 * exclusion region, a data alignment that is stricter than 1, and/or
500	 * an active address boundary.
501	 */
502	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
503
504		/* Must bounce */
505		struct bounce_zone *bz;
506		int maxpages;
507
508		if (dmat->bounce_zone == NULL) {
509			if ((error = alloc_bounce_zone(dmat)) != 0) {
510				_busdma_free_dmamap(newmap);
511				*mapp = NULL;
512				return (error);
513			}
514		}
515		bz = dmat->bounce_zone;
516
517		/* Initialize the new map */
518		STAILQ_INIT(&((*mapp)->bpages));
519
520		/*
521		 * Attempt to add pages to our pool on a per-instance
522		 * basis up to a sane limit.
523		 */
524		maxpages = MAX_BPAGES;
525		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
526		 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
527			int pages;
528
529			pages = MAX(atop(dmat->maxsize), 1);
530			pages = MIN(maxpages - bz->total_bpages, pages);
531			pages = MAX(pages, 1);
532			if (alloc_bounce_pages(dmat, pages) < pages)
533				error = ENOMEM;
534
535			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
536				if (error == 0)
537					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
538			} else {
539				error = 0;
540			}
541		}
542	}
543	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
544	    __func__, dmat, dmat->flags, error);
545
546	return (0);
547}
548
549/*
550 * Destroy a handle for mapping from kva/uva/physical
551 * address space into bus device space.
552 */
553int
554bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
555{
556
557	_busdma_free_dmamap(map);
558	if (STAILQ_FIRST(&map->bpages) != NULL) {
559		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
560		    __func__, dmat, EBUSY);
561		return (EBUSY);
562	}
563        dmat->map_count--;
564	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
565        return (0);
566}
567
568/*
569 * Allocate a piece of memory that can be efficiently mapped into
570 * bus device space based on the constraints lited in the dma tag.
571 * A dmamap to for use with dmamap_load is also allocated.
572 */
573int
574bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
575                 bus_dmamap_t *mapp)
576{
577	bus_dmamap_t newmap = NULL;
578
579	int mflags;
580
581	if (flags & BUS_DMA_NOWAIT)
582		mflags = M_NOWAIT;
583	else
584		mflags = M_WAITOK;
585	if (flags & BUS_DMA_ZERO)
586		mflags |= M_ZERO;
587
588	newmap = _busdma_alloc_dmamap();
589	if (newmap == NULL) {
590		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
591		    __func__, dmat, dmat->flags, ENOMEM);
592		return (ENOMEM);
593	}
594	dmat->map_count++;
595	*mapp = newmap;
596	newmap->dmat = dmat;
597
598        if (dmat->maxsize <= PAGE_SIZE &&
599	   (dmat->alignment < dmat->maxsize) &&
600	   !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
601                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
602        } else {
603                /*
604                 * XXX Use Contigmalloc until it is merged into this facility
605                 *     and handles multi-seg allocations.  Nobody is doing
606                 *     multi-seg allocations yet though.
607                 */
608                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
609                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
610                    dmat->boundary);
611        }
612        if (*vaddr == NULL) {
613		if (newmap != NULL) {
614			_busdma_free_dmamap(newmap);
615			dmat->map_count--;
616		}
617		*mapp = NULL;
618                return (ENOMEM);
619	}
620	if (flags & BUS_DMA_COHERENT) {
621		void *tmpaddr = arm_remap_nocache(
622		    (void *)((vm_offset_t)*vaddr &~ PAGE_MASK),
623		    dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK));
624
625		if (tmpaddr) {
626			tmpaddr = (void *)((vm_offset_t)(tmpaddr) +
627			    ((vm_offset_t)*vaddr & PAGE_MASK));
628			newmap->origbuffer = *vaddr;
629			newmap->allocbuffer = tmpaddr;
630			cpu_idcache_wbinv_range((vm_offset_t)*vaddr,
631			    dmat->maxsize);
632			cpu_l2cache_wbinv_range((vm_offset_t)*vaddr,
633			    dmat->maxsize);
634			*vaddr = tmpaddr;
635		} else
636			newmap->origbuffer = newmap->allocbuffer = NULL;
637	} else
638		newmap->origbuffer = newmap->allocbuffer = NULL;
639        return (0);
640}
641
642/*
643 * Free a piece of memory and it's allocated dmamap, that was allocated
644 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
645 */
646void
647bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
648{
649	if (map->allocbuffer) {
650		KASSERT(map->allocbuffer == vaddr,
651		    ("Trying to freeing the wrong DMA buffer"));
652		vaddr = map->origbuffer;
653		arm_unmap_nocache(map->allocbuffer, dmat->maxsize);
654	}
655        if (dmat->maxsize <= PAGE_SIZE &&
656	   dmat->alignment < dmat->maxsize &&
657	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
658		free(vaddr, M_DEVBUF);
659        else {
660		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
661	}
662	dmat->map_count--;
663	_busdma_free_dmamap(map);
664	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
665}
666
667static int
668_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
669			bus_size_t buflen, int flags)
670{
671	vm_offset_t vaddr;
672	vm_offset_t vendaddr;
673	bus_addr_t paddr;
674
675	if ((map->pagesneeded == 0)) {
676		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
677		    dmat->lowaddr, dmat->boundary, dmat->alignment);
678		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
679		    map, map->pagesneeded);
680		/*
681		 * Count the number of bounce pages
682		 * needed in order to complete this transfer
683		 */
684		vaddr = trunc_page((vm_offset_t)buf);
685		vendaddr = (vm_offset_t)buf + buflen;
686
687		while (vaddr < vendaddr) {
688			paddr = pmap_kextract(vaddr);
689			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
690			    run_filter(dmat, paddr) != 0)
691				map->pagesneeded++;
692			vaddr += PAGE_SIZE;
693		}
694		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
695	}
696
697	/* Reserve Necessary Bounce Pages */
698	if (map->pagesneeded != 0) {
699		mtx_lock(&bounce_lock);
700		if (flags & BUS_DMA_NOWAIT) {
701			if (reserve_bounce_pages(dmat, map, 0) != 0) {
702				mtx_unlock(&bounce_lock);
703				return (ENOMEM);
704			}
705		} else {
706			if (reserve_bounce_pages(dmat, map, 1) != 0) {
707				/* Queue us for resources */
708				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
709				    map, links);
710				mtx_unlock(&bounce_lock);
711				return (EINPROGRESS);
712			}
713		}
714		mtx_unlock(&bounce_lock);
715	}
716
717	return (0);
718}
719
720/*
721 * Utility function to load a linear buffer.  lastaddrp holds state
722 * between invocations (for multiple-buffer loads).  segp contains
723 * the starting segment on entrance, and the ending segment on exit.
724 * first indicates if this is the first invocation of this function.
725 */
726static __inline int
727bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
728    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
729    int flags, vm_offset_t *lastaddrp, int *segp)
730{
731	bus_size_t sgsize;
732	bus_addr_t curaddr, lastaddr, baddr, bmask;
733	vm_offset_t vaddr = (vm_offset_t)buf;
734	int seg;
735	int error = 0;
736	pd_entry_t *pde;
737	pt_entry_t pte;
738	pt_entry_t *ptep;
739
740	lastaddr = *lastaddrp;
741	bmask = ~(dmat->boundary - 1);
742
743	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
744		error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags);
745		if (error)
746			return (error);
747	}
748	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
749	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
750
751	for (seg = *segp; buflen > 0 ; ) {
752		/*
753		 * Get the physical address for this segment.
754		 *
755		 * XXX Don't support checking for coherent mappings
756		 * XXX in user address space.
757		 */
758		if (__predict_true(pmap == pmap_kernel())) {
759			if (pmap_get_pde_pte(pmap, vaddr, &pde, &ptep) == FALSE)
760				return (EFAULT);
761
762			if (__predict_false(pmap_pde_section(pde))) {
763				if (*pde & L1_S_SUPERSEC)
764					curaddr = (*pde & L1_SUP_FRAME) |
765					    (vaddr & L1_SUP_OFFSET);
766				else
767					curaddr = (*pde & L1_S_FRAME) |
768					    (vaddr & L1_S_OFFSET);
769				if (*pde & L1_S_CACHE_MASK) {
770					map->flags &=
771					    ~DMAMAP_COHERENT;
772				}
773			} else {
774				pte = *ptep;
775				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
776				    ("INV type"));
777				if (__predict_false((pte & L2_TYPE_MASK)
778						    == L2_TYPE_L)) {
779					curaddr = (pte & L2_L_FRAME) |
780					    (vaddr & L2_L_OFFSET);
781					if (pte & L2_L_CACHE_MASK) {
782						map->flags &=
783						    ~DMAMAP_COHERENT;
784
785					}
786				} else {
787					curaddr = (pte & L2_S_FRAME) |
788					    (vaddr & L2_S_OFFSET);
789					if (pte & L2_S_CACHE_MASK) {
790						map->flags &=
791						    ~DMAMAP_COHERENT;
792					}
793				}
794			}
795		} else {
796			curaddr = pmap_extract(pmap, vaddr);
797			map->flags &= ~DMAMAP_COHERENT;
798		}
799
800		/*
801		 * Compute the segment size, and adjust counts.
802		 */
803		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
804		if (sgsize > dmat->maxsegsz)
805			sgsize = dmat->maxsegsz;
806		if (buflen < sgsize)
807			sgsize = buflen;
808
809		/*
810		 * Make sure we don't cross any boundaries.
811		 */
812		if (dmat->boundary > 0) {
813			baddr = (curaddr + dmat->boundary) & bmask;
814			if (sgsize > (baddr - curaddr))
815				sgsize = (baddr - curaddr);
816		}
817		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
818		    map->pagesneeded != 0 && run_filter(dmat, curaddr))
819			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
820
821		if (dmat->ranges) {
822			struct arm32_dma_range *dr;
823
824			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
825			    curaddr);
826			if (dr == NULL)
827				return (EINVAL);
828			/*
829		     	 * In a valid DMA range.  Translate the physical
830			 * memory address to an address in the DMA window.
831			 */
832			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
833
834		}
835
836		/*
837		 * Insert chunk into a segment, coalescing with
838		 * the previous segment if possible.
839		 */
840		if (seg >= 0 && curaddr == lastaddr &&
841		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
842		    (dmat->boundary == 0 ||
843		     (segs[seg].ds_addr & bmask) ==
844		     (curaddr & bmask))) {
845			segs[seg].ds_len += sgsize;
846			goto segdone;
847		} else {
848			if (++seg >= dmat->nsegments)
849				break;
850			segs[seg].ds_addr = curaddr;
851			segs[seg].ds_len = sgsize;
852		}
853		if (error)
854			break;
855segdone:
856		lastaddr = curaddr + sgsize;
857		vaddr += sgsize;
858		buflen -= sgsize;
859	}
860
861	*segp = seg;
862	*lastaddrp = lastaddr;
863
864	/*
865	 * Did we fit?
866	 */
867	if (buflen != 0)
868		error = EFBIG; /* XXX better return value here? */
869	return (error);
870}
871
872/*
873 * Map the buffer buf into bus space using the dmamap map.
874 */
875int
876bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
877                bus_size_t buflen, bus_dmamap_callback_t *callback,
878                void *callback_arg, int flags)
879{
880     	vm_offset_t	lastaddr = 0;
881	int		error, nsegs = -1;
882#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
883	bus_dma_segment_t dm_segments[dmat->nsegments];
884#else
885	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
886#endif
887
888	KASSERT(dmat != NULL, ("dmatag is NULL"));
889	KASSERT(map != NULL, ("dmamap is NULL"));
890	map->callback = callback;
891	map->callback_arg = callback_arg;
892	map->flags &= ~DMAMAP_TYPE_MASK;
893	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
894	map->buffer = buf;
895	map->len = buflen;
896	error = bus_dmamap_load_buffer(dmat,
897	    dm_segments, map, buf, buflen, kernel_pmap,
898	    flags, &lastaddr, &nsegs);
899	if (error == EINPROGRESS)
900		return (error);
901	if (error)
902		(*callback)(callback_arg, NULL, 0, error);
903	else
904		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
905
906	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
907	    __func__, dmat, dmat->flags, nsegs + 1, error);
908
909	return (error);
910}
911
912/*
913 * Like bus_dmamap_load(), but for mbufs.
914 */
915int
916bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
917		     bus_dmamap_callback2_t *callback, void *callback_arg,
918		     int flags)
919{
920#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
921	bus_dma_segment_t dm_segments[dmat->nsegments];
922#else
923	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
924#endif
925	int nsegs = -1, error = 0;
926
927	M_ASSERTPKTHDR(m0);
928
929	map->flags &= ~DMAMAP_TYPE_MASK;
930	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
931	map->buffer = m0;
932	map->len = 0;
933	if (m0->m_pkthdr.len <= dmat->maxsize) {
934		vm_offset_t lastaddr = 0;
935		struct mbuf *m;
936
937		for (m = m0; m != NULL && error == 0; m = m->m_next) {
938			if (m->m_len > 0) {
939				error = bus_dmamap_load_buffer(dmat,
940				    dm_segments, map, m->m_data, m->m_len,
941				    pmap_kernel(), flags, &lastaddr, &nsegs);
942				map->len += m->m_len;
943			}
944		}
945	} else {
946		error = EINVAL;
947	}
948
949	if (error) {
950		/*
951		 * force "no valid mappings" on error in callback.
952		 */
953		(*callback)(callback_arg, dm_segments, 0, 0, error);
954	} else {
955		(*callback)(callback_arg, dm_segments, nsegs + 1,
956		    m0->m_pkthdr.len, error);
957	}
958	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
959	    __func__, dmat, dmat->flags, error, nsegs + 1);
960
961	return (error);
962}
963
964int
965bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
966			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
967			int flags)
968{
969	int error = 0;
970	M_ASSERTPKTHDR(m0);
971
972	flags |= BUS_DMA_NOWAIT;
973	*nsegs = -1;
974	map->flags &= ~DMAMAP_TYPE_MASK;
975	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
976	map->buffer = m0;
977	map->len = 0;
978	if (m0->m_pkthdr.len <= dmat->maxsize) {
979		vm_offset_t lastaddr = 0;
980		struct mbuf *m;
981
982		for (m = m0; m != NULL && error == 0; m = m->m_next) {
983			if (m->m_len > 0) {
984				error = bus_dmamap_load_buffer(dmat, segs, map,
985						m->m_data, m->m_len,
986						pmap_kernel(), flags, &lastaddr,
987						nsegs);
988				map->len += m->m_len;
989			}
990		}
991	} else {
992		error = EINVAL;
993	}
994
995	/* XXX FIXME: Having to increment nsegs is really annoying */
996	++*nsegs;
997	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
998	    __func__, dmat, dmat->flags, error, *nsegs);
999	return (error);
1000}
1001
1002/*
1003 * Like bus_dmamap_load(), but for uios.
1004 */
1005int
1006bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
1007    bus_dmamap_callback2_t *callback, void *callback_arg,
1008    int flags)
1009{
1010	vm_offset_t lastaddr = 0;
1011#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
1012	bus_dma_segment_t dm_segments[dmat->nsegments];
1013#else
1014	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
1015#endif
1016	int nsegs, i, error;
1017	bus_size_t resid;
1018	struct iovec *iov;
1019	struct pmap *pmap;
1020
1021	resid = uio->uio_resid;
1022	iov = uio->uio_iov;
1023	map->flags &= ~DMAMAP_TYPE_MASK;
1024	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
1025	map->buffer = uio;
1026	map->len = 0;
1027
1028	if (uio->uio_segflg == UIO_USERSPACE) {
1029		KASSERT(uio->uio_td != NULL,
1030		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
1031		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
1032	} else
1033		pmap = kernel_pmap;
1034
1035	error = 0;
1036	nsegs = -1;
1037	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
1038		/*
1039		 * Now at the first iovec to load.  Load each iovec
1040		 * until we have exhausted the residual count.
1041		 */
1042		bus_size_t minlen =
1043		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
1044		caddr_t addr = (caddr_t) iov[i].iov_base;
1045
1046		if (minlen > 0) {
1047			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
1048			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
1049
1050			map->len += minlen;
1051			resid -= minlen;
1052		}
1053	}
1054
1055	if (error) {
1056		/*
1057		 * force "no valid mappings" on error in callback.
1058		 */
1059		(*callback)(callback_arg, dm_segments, 0, 0, error);
1060	} else {
1061		(*callback)(callback_arg, dm_segments, nsegs+1,
1062		    uio->uio_resid, error);
1063	}
1064
1065	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
1066	    __func__, dmat, dmat->flags, error, nsegs + 1);
1067	return (error);
1068}
1069
1070/*
1071 * Release the mapping held by map.
1072 */
1073void
1074_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1075{
1076	struct bounce_page *bpage;
1077
1078	map->flags &= ~DMAMAP_TYPE_MASK;
1079	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1080		STAILQ_REMOVE_HEAD(&map->bpages, links);
1081		free_bounce_page(dmat, bpage);
1082	}
1083	return;
1084}
1085
1086static void
1087bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
1088{
1089	char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align];
1090
1091	if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) {
1092		cpu_dcache_wb_range((vm_offset_t)buf, len);
1093		cpu_l2cache_wb_range((vm_offset_t)buf, len);
1094	}
1095	if (op & BUS_DMASYNC_PREREAD) {
1096		if (!(op & BUS_DMASYNC_PREWRITE) &&
1097		    ((((vm_offset_t)(buf) | len) & arm_dcache_align_mask) == 0)) {
1098			cpu_dcache_inv_range((vm_offset_t)buf, len);
1099			cpu_l2cache_inv_range((vm_offset_t)buf, len);
1100		} else {
1101		    	cpu_dcache_wbinv_range((vm_offset_t)buf, len);
1102	    		cpu_l2cache_wbinv_range((vm_offset_t)buf, len);
1103		}
1104	}
1105	if (op & BUS_DMASYNC_POSTREAD) {
1106		if ((vm_offset_t)buf & arm_dcache_align_mask) {
1107			memcpy(_tmp_cl, (void *)((vm_offset_t)buf & ~
1108			    arm_dcache_align_mask),
1109			    (vm_offset_t)buf & arm_dcache_align_mask);
1110		}
1111		if (((vm_offset_t)buf + len) & arm_dcache_align_mask) {
1112			memcpy(_tmp_clend, (void *)((vm_offset_t)buf + len),
1113			    arm_dcache_align - (((vm_offset_t)(buf) + len) &
1114			   arm_dcache_align_mask));
1115		}
1116		cpu_dcache_inv_range((vm_offset_t)buf, len);
1117		cpu_l2cache_inv_range((vm_offset_t)buf, len);
1118
1119		if ((vm_offset_t)buf & arm_dcache_align_mask)
1120			memcpy((void *)((vm_offset_t)buf &
1121			    ~arm_dcache_align_mask), _tmp_cl,
1122			    (vm_offset_t)buf & arm_dcache_align_mask);
1123		if (((vm_offset_t)buf + len) & arm_dcache_align_mask)
1124			memcpy((void *)((vm_offset_t)buf + len), _tmp_clend,
1125			    arm_dcache_align - (((vm_offset_t)(buf) + len) &
1126			   arm_dcache_align_mask));
1127	}
1128}
1129
1130static void
1131_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1132{
1133	struct bounce_page *bpage;
1134
1135	STAILQ_FOREACH(bpage, &map->bpages, links) {
1136		if (op & BUS_DMASYNC_PREWRITE) {
1137			bcopy((void *)bpage->datavaddr,
1138			    (void *)(bpage->vaddr_nocache != 0 ?
1139				     bpage->vaddr_nocache : bpage->vaddr),
1140			    bpage->datacount);
1141			if (bpage->vaddr_nocache == 0) {
1142				cpu_dcache_wb_range(bpage->vaddr,
1143				    bpage->datacount);
1144				cpu_l2cache_wb_range(bpage->vaddr,
1145				    bpage->datacount);
1146			}
1147		}
1148		if (op & BUS_DMASYNC_POSTREAD) {
1149			if (bpage->vaddr_nocache == 0) {
1150				cpu_dcache_inv_range(bpage->vaddr,
1151				    bpage->datacount);
1152				cpu_l2cache_inv_range(bpage->vaddr,
1153				    bpage->datacount);
1154			}
1155			bcopy((void *)(bpage->vaddr_nocache != 0 ?
1156	       		    bpage->vaddr_nocache : bpage->vaddr),
1157			    (void *)bpage->datavaddr, bpage->datacount);
1158		}
1159	}
1160}
1161
1162static __inline int
1163_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
1164{
1165	struct bounce_page *bpage;
1166
1167	STAILQ_FOREACH(bpage, &map->bpages, links) {
1168		if ((vm_offset_t)buf >= bpage->datavaddr &&
1169		    (vm_offset_t)buf + len < bpage->datavaddr +
1170		    bpage->datacount)
1171			return (1);
1172	}
1173	return (0);
1174
1175}
1176
1177void
1178_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1179{
1180	struct mbuf *m;
1181	struct uio *uio;
1182	int resid;
1183	struct iovec *iov;
1184
1185	if (op == BUS_DMASYNC_POSTWRITE)
1186		return;
1187	if (STAILQ_FIRST(&map->bpages))
1188		_bus_dmamap_sync_bp(dmat, map, op);
1189	if (map->flags & DMAMAP_COHERENT)
1190		return;
1191	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1192	switch(map->flags & DMAMAP_TYPE_MASK) {
1193	case DMAMAP_LINEAR:
1194		if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
1195			bus_dmamap_sync_buf(map->buffer, map->len, op);
1196		break;
1197	case DMAMAP_MBUF:
1198		m = map->buffer;
1199		while (m) {
1200			if (m->m_len > 0 &&
1201			    !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
1202				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
1203			m = m->m_next;
1204		}
1205		break;
1206	case DMAMAP_UIO:
1207		uio = map->buffer;
1208		iov = uio->uio_iov;
1209		resid = uio->uio_resid;
1210		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
1211			bus_size_t minlen = resid < iov[i].iov_len ? resid :
1212			    iov[i].iov_len;
1213			if (minlen > 0) {
1214				if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
1215				    minlen))
1216					bus_dmamap_sync_buf(iov[i].iov_base,
1217					    minlen, op);
1218				resid -= minlen;
1219			}
1220		}
1221		break;
1222	default:
1223		break;
1224	}
1225	cpu_drain_writebuf();
1226}
1227
1228static void
1229init_bounce_pages(void *dummy __unused)
1230{
1231
1232	total_bpages = 0;
1233	STAILQ_INIT(&bounce_zone_list);
1234	STAILQ_INIT(&bounce_map_waitinglist);
1235	STAILQ_INIT(&bounce_map_callbacklist);
1236	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1237}
1238SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1239
1240static struct sysctl_ctx_list *
1241busdma_sysctl_tree(struct bounce_zone *bz)
1242{
1243	return (&bz->sysctl_tree);
1244}
1245
1246static struct sysctl_oid *
1247busdma_sysctl_tree_top(struct bounce_zone *bz)
1248{
1249	return (bz->sysctl_tree_top);
1250}
1251
1252static int
1253alloc_bounce_zone(bus_dma_tag_t dmat)
1254{
1255	struct bounce_zone *bz;
1256
1257	/* Check to see if we already have a suitable zone */
1258	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1259		if ((dmat->alignment <= bz->alignment)
1260		 && (dmat->boundary <= bz->boundary)
1261		 && (dmat->lowaddr >= bz->lowaddr)) {
1262			dmat->bounce_zone = bz;
1263			return (0);
1264		}
1265	}
1266
1267	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1268	    M_NOWAIT | M_ZERO)) == NULL)
1269		return (ENOMEM);
1270
1271	STAILQ_INIT(&bz->bounce_page_list);
1272	bz->free_bpages = 0;
1273	bz->reserved_bpages = 0;
1274	bz->active_bpages = 0;
1275	bz->lowaddr = dmat->lowaddr;
1276	bz->alignment = dmat->alignment;
1277	bz->boundary = dmat->boundary;
1278	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1279	busdma_zonecount++;
1280	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1281	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1282	dmat->bounce_zone = bz;
1283
1284	sysctl_ctx_init(&bz->sysctl_tree);
1285	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1286	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1287	    CTLFLAG_RD, 0, "");
1288	if (bz->sysctl_tree_top == NULL) {
1289		sysctl_ctx_free(&bz->sysctl_tree);
1290		return (0);	/* XXX error code? */
1291	}
1292
1293	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1294	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1295	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1296	    "Total bounce pages");
1297	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1298	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1299	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1300	    "Free bounce pages");
1301	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1302	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1303	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1304	    "Reserved bounce pages");
1305	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1306	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1307	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1308	    "Active bounce pages");
1309	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1310	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1311	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1312	    "Total bounce requests");
1313	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1314	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1315	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1316	    "Total bounce requests that were deferred");
1317	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1318	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1319	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1320	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1321	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1322	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1323	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1324	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1325	    "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
1326
1327	return (0);
1328}
1329
1330static int
1331alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1332{
1333	struct bounce_zone *bz;
1334	int count;
1335
1336	bz = dmat->bounce_zone;
1337	count = 0;
1338	while (numpages > 0) {
1339		struct bounce_page *bpage;
1340
1341		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1342						     M_NOWAIT | M_ZERO);
1343
1344		if (bpage == NULL)
1345			break;
1346		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1347							 M_NOWAIT, 0ul,
1348							 bz->lowaddr,
1349							 PAGE_SIZE,
1350							 bz->boundary);
1351		if (bpage->vaddr == 0) {
1352			free(bpage, M_DEVBUF);
1353			break;
1354		}
1355		bpage->busaddr = pmap_kextract(bpage->vaddr);
1356		bpage->vaddr_nocache = (vm_offset_t)arm_remap_nocache(
1357		    (void *)bpage->vaddr, PAGE_SIZE);
1358		mtx_lock(&bounce_lock);
1359		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1360		total_bpages++;
1361		bz->total_bpages++;
1362		bz->free_bpages++;
1363		mtx_unlock(&bounce_lock);
1364		count++;
1365		numpages--;
1366	}
1367	return (count);
1368}
1369
1370static int
1371reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1372{
1373	struct bounce_zone *bz;
1374	int pages;
1375
1376	mtx_assert(&bounce_lock, MA_OWNED);
1377	bz = dmat->bounce_zone;
1378	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1379	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1380		return (map->pagesneeded - (map->pagesreserved + pages));
1381	bz->free_bpages -= pages;
1382	bz->reserved_bpages += pages;
1383	map->pagesreserved += pages;
1384	pages = map->pagesneeded - map->pagesreserved;
1385
1386	return (pages);
1387}
1388
1389static bus_addr_t
1390add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1391		bus_size_t size)
1392{
1393	struct bounce_zone *bz;
1394	struct bounce_page *bpage;
1395
1396	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1397	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1398
1399	bz = dmat->bounce_zone;
1400	if (map->pagesneeded == 0)
1401		panic("add_bounce_page: map doesn't need any pages");
1402	map->pagesneeded--;
1403
1404	if (map->pagesreserved == 0)
1405		panic("add_bounce_page: map doesn't need any pages");
1406	map->pagesreserved--;
1407
1408	mtx_lock(&bounce_lock);
1409	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1410	if (bpage == NULL)
1411		panic("add_bounce_page: free page list is empty");
1412
1413	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1414	bz->reserved_bpages--;
1415	bz->active_bpages++;
1416	mtx_unlock(&bounce_lock);
1417
1418	bpage->datavaddr = vaddr;
1419	bpage->datacount = size;
1420	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1421	return (bpage->busaddr);
1422}
1423
1424static void
1425free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1426{
1427	struct bus_dmamap *map;
1428	struct bounce_zone *bz;
1429
1430	bz = dmat->bounce_zone;
1431	bpage->datavaddr = 0;
1432	bpage->datacount = 0;
1433
1434	mtx_lock(&bounce_lock);
1435	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1436	bz->free_bpages++;
1437	bz->active_bpages--;
1438	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1439		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1440			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1441			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1442					   map, links);
1443			busdma_swi_pending = 1;
1444			bz->total_deferred++;
1445			swi_sched(vm_ih, 0);
1446		}
1447	}
1448	mtx_unlock(&bounce_lock);
1449}
1450
1451void
1452busdma_swi(void)
1453{
1454	bus_dma_tag_t dmat;
1455	struct bus_dmamap *map;
1456
1457	mtx_lock(&bounce_lock);
1458	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1459		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1460		mtx_unlock(&bounce_lock);
1461		dmat = map->dmat;
1462		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1463		bus_dmamap_load(map->dmat, map, map->buffer, map->len,
1464		    map->callback, map->callback_arg, /*flags*/0);
1465		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1466		mtx_lock(&bounce_lock);
1467	}
1468	mtx_unlock(&bounce_lock);
1469}
1470