busdma_machdep.c revision 290219
1/*-
2 * Copyright (c) 2006 Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 290219 2015-10-31 00:29:26Z adrian $");
31
32/*
33 * MIPS bus dma support routines
34 */
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/malloc.h>
39#include <sys/bus.h>
40#include <sys/busdma_bufalloc.h>
41#include <sys/interrupt.h>
42#include <sys/lock.h>
43#include <sys/proc.h>
44#include <sys/memdesc.h>
45#include <sys/mutex.h>
46#include <sys/ktr.h>
47#include <sys/kernel.h>
48#include <sys/sysctl.h>
49#include <sys/uio.h>
50
51#include <vm/uma.h>
52#include <vm/vm.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_page.h>
56#include <vm/vm_map.h>
57
58#include <machine/atomic.h>
59#include <machine/bus.h>
60#include <machine/cache.h>
61#include <machine/cpufunc.h>
62#include <machine/cpuinfo.h>
63#include <machine/md_var.h>
64
65#define MAX_BPAGES 64
66#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
67#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
68
69struct bounce_zone;
70
71struct bus_dma_tag {
72	bus_dma_tag_t		parent;
73	bus_size_t		alignment;
74	bus_addr_t		boundary;
75	bus_addr_t		lowaddr;
76	bus_addr_t		highaddr;
77	bus_dma_filter_t	*filter;
78	void			*filterarg;
79	bus_size_t		maxsize;
80	u_int			nsegments;
81	bus_size_t		maxsegsz;
82	int			flags;
83	int			ref_count;
84	int			map_count;
85	bus_dma_lock_t		*lockfunc;
86	void			*lockfuncarg;
87	bus_dma_segment_t	*segments;
88	struct bounce_zone *bounce_zone;
89};
90
91struct bounce_page {
92	vm_offset_t	vaddr;		/* kva of bounce buffer */
93	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
94	bus_addr_t	busaddr;	/* Physical address */
95	vm_offset_t	datavaddr;	/* kva of client data */
96	bus_addr_t	dataaddr;	/* client physical address */
97	bus_size_t	datacount;	/* client data count */
98	STAILQ_ENTRY(bounce_page) links;
99};
100
101struct sync_list {
102	vm_offset_t	vaddr;		/* kva of bounce buffer */
103	bus_addr_t	busaddr;	/* Physical address */
104	bus_size_t	datacount;	/* client data count */
105};
106
107int busdma_swi_pending;
108
109struct bounce_zone {
110	STAILQ_ENTRY(bounce_zone) links;
111	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112	int		total_bpages;
113	int		free_bpages;
114	int		reserved_bpages;
115	int		active_bpages;
116	int		total_bounced;
117	int		total_deferred;
118	int		map_count;
119	bus_size_t	alignment;
120	bus_addr_t	lowaddr;
121	char		zoneid[8];
122	char		lowaddrid[20];
123	struct sysctl_ctx_list sysctl_tree;
124	struct sysctl_oid *sysctl_tree_top;
125};
126
127static struct mtx bounce_lock;
128static int total_bpages;
129static int busdma_zonecount;
130static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
131
132static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
133SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
134	   "Total bounce pages");
135
136#define DMAMAP_UNCACHEABLE	0x08
137#define DMAMAP_CACHE_ALIGNED	0x10
138
139struct bus_dmamap {
140	struct bp_list	bpages;
141	int		pagesneeded;
142	int		pagesreserved;
143	bus_dma_tag_t	dmat;
144	struct memdesc	mem;
145	int		flags;
146	void		*origbuffer;
147	void		*allocbuffer;
148	TAILQ_ENTRY(bus_dmamap)	freelist;
149	STAILQ_ENTRY(bus_dmamap) links;
150	bus_dmamap_callback_t *callback;
151	void		*callback_arg;
152	int		sync_count;
153	struct sync_list *slist;
154};
155
156static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158
159static void init_bounce_pages(void *dummy);
160static int alloc_bounce_zone(bus_dma_tag_t dmat);
161static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
162static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
163				int commit);
164static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
165				  vm_offset_t vaddr, bus_addr_t addr,
166				  bus_size_t size);
167static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
168
169/* Default tag, as most drivers provide no parent tag. */
170bus_dma_tag_t mips_root_dma_tag;
171
172static uma_zone_t dmamap_zone;	/* Cache of struct bus_dmamap items */
173
174static busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
175static busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
176
177MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
178MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
179
180/*
181 * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
182 * It'll need platform-specific changes if this code is copied.
183 */
184static int
185dmamap_ctor(void *mem, int size, void *arg, int flags)
186{
187	bus_dmamap_t map;
188	bus_dma_tag_t dmat;
189
190	map = (bus_dmamap_t)mem;
191	dmat = (bus_dma_tag_t)arg;
192
193	dmat->map_count++;
194
195	map->dmat = dmat;
196	map->flags = 0;
197	map->slist = NULL;
198	map->allocbuffer = NULL;
199	map->sync_count = 0;
200	STAILQ_INIT(&map->bpages);
201
202	return (0);
203}
204
205/*
206 * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
207 * It may need platform-specific changes if this code is copied              .
208 */
209static void
210dmamap_dtor(void *mem, int size, void *arg)
211{
212	bus_dmamap_t map;
213
214	map = (bus_dmamap_t)mem;
215
216	map->dmat->map_count--;
217}
218
219static void
220busdma_init(void *dummy)
221{
222
223	/* Create a cache of maps for bus_dmamap_create(). */
224	dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
225	    dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
226
227	/* Create a cache of buffers in standard (cacheable) memory. */
228	standard_allocator = busdma_bufalloc_create("buffer",
229	    mips_pdcache_linesize,	/* minimum_alignment */
230	    NULL,			/* uma_alloc func */
231	    NULL,			/* uma_free func */
232	    0);				/* uma_zcreate_flags */
233
234	/*
235	 * Create a cache of buffers in uncacheable memory, to implement the
236	 * BUS_DMA_COHERENT flag.
237	 */
238	coherent_allocator = busdma_bufalloc_create("coherent",
239	    mips_pdcache_linesize,	/* minimum_alignment */
240	    busdma_bufalloc_alloc_uncacheable,
241	    busdma_bufalloc_free_uncacheable,
242	    0);				/* uma_zcreate_flags */
243}
244SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
245
246/*
247 * Return true if a match is made.
248 *
249 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
250 *
251 * If paddr is within the bounds of the dma tag then call the filter callback
252 * to check for a match, if there is no filter callback then assume a match.
253 */
254static int
255run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
256{
257	int retval;
258
259	retval = 0;
260
261	do {
262		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
263		 || ((paddr & (dmat->alignment - 1)) != 0))
264		 && (dmat->filter == NULL
265		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
266			retval = 1;
267
268		dmat = dmat->parent;
269	} while (retval == 0 && dmat != NULL);
270	return (retval);
271}
272
273/*
274 * Check to see if the specified page is in an allowed DMA range.
275 */
276
277static __inline int
278_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
279{
280	int i;
281	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
282		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
283		    || (lowaddr < phys_avail[i] &&
284		    highaddr > phys_avail[i]))
285			return (1);
286	}
287	return (0);
288}
289
290/*
291 * Convenience function for manipulating driver locks from busdma (during
292 * busdma_swi, for example).  Drivers that don't provide their own locks
293 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
294 * non-mutex locking scheme don't have to use this at all.
295 */
296void
297busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
298{
299	struct mtx *dmtx;
300
301	dmtx = (struct mtx *)arg;
302	switch (op) {
303	case BUS_DMA_LOCK:
304		mtx_lock(dmtx);
305		break;
306	case BUS_DMA_UNLOCK:
307		mtx_unlock(dmtx);
308		break;
309	default:
310		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
311	}
312}
313
314/*
315 * dflt_lock should never get called.  It gets put into the dma tag when
316 * lockfunc == NULL, which is only valid if the maps that are associated
317 * with the tag are meant to never be defered.
318 * XXX Should have a way to identify which driver is responsible here.
319 */
320static void
321dflt_lock(void *arg, bus_dma_lock_op_t op)
322{
323#ifdef INVARIANTS
324	panic("driver error: busdma dflt_lock called");
325#else
326	printf("DRIVER_ERROR: busdma dflt_lock called\n");
327#endif
328}
329
330static __inline bus_dmamap_t
331_busdma_alloc_dmamap(bus_dma_tag_t dmat)
332{
333	struct sync_list *slist;
334	bus_dmamap_t map;
335
336	slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
337	if (slist == NULL)
338		return (NULL);
339	map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
340	if (map != NULL)
341		map->slist = slist;
342	else
343		free(slist, M_BUSDMA);
344	return (map);
345}
346
347static __inline void
348_busdma_free_dmamap(bus_dmamap_t map)
349{
350
351	free(map->slist, M_BUSDMA);
352	uma_zfree(dmamap_zone, map);
353}
354
355/*
356 * Allocate a device specific dma_tag.
357 */
358#define SEG_NB 1024
359
360int
361bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
362    bus_addr_t boundary, bus_addr_t lowaddr,
363    bus_addr_t highaddr, bus_dma_filter_t *filter,
364    void *filterarg, bus_size_t maxsize, int nsegments,
365    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
366    void *lockfuncarg, bus_dma_tag_t *dmat)
367{
368	bus_dma_tag_t newtag;
369	int error = 0;
370	/* Return a NULL tag on failure */
371	*dmat = NULL;
372	if (!parent)
373		parent = mips_root_dma_tag;
374
375	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
376	if (newtag == NULL) {
377		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
378		    __func__, newtag, 0, error);
379		return (ENOMEM);
380	}
381
382	newtag->parent = parent;
383	newtag->alignment = alignment;
384	newtag->boundary = boundary;
385	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
386	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
387	newtag->filter = filter;
388	newtag->filterarg = filterarg;
389	newtag->maxsize = maxsize;
390	newtag->nsegments = nsegments;
391	newtag->maxsegsz = maxsegsz;
392	newtag->flags = flags;
393	if (cpuinfo.cache_coherent_dma)
394		newtag->flags |= BUS_DMA_COHERENT;
395	newtag->ref_count = 1; /* Count ourself */
396	newtag->map_count = 0;
397	if (lockfunc != NULL) {
398		newtag->lockfunc = lockfunc;
399		newtag->lockfuncarg = lockfuncarg;
400	} else {
401		newtag->lockfunc = dflt_lock;
402		newtag->lockfuncarg = NULL;
403	}
404	newtag->segments = NULL;
405
406	/*
407	 * Take into account any restrictions imposed by our parent tag
408	 */
409	if (parent != NULL) {
410		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
411		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
412		if (newtag->boundary == 0)
413			newtag->boundary = parent->boundary;
414		else if (parent->boundary != 0)
415			newtag->boundary =
416			    MIN(parent->boundary, newtag->boundary);
417		if ((newtag->filter != NULL) ||
418		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
419			newtag->flags |= BUS_DMA_COULD_BOUNCE;
420		if (newtag->filter == NULL) {
421			/*
422			* Short circuit looking at our parent directly
423			* since we have encapsulated all of its information
424			*/
425			newtag->filter = parent->filter;
426			newtag->filterarg = parent->filterarg;
427			newtag->parent = parent->parent;
428		}
429		if (newtag->parent != NULL)
430			atomic_add_int(&parent->ref_count, 1);
431	}
432	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
433	 || newtag->alignment > 1)
434		newtag->flags |= BUS_DMA_COULD_BOUNCE;
435
436	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
437	    (flags & BUS_DMA_ALLOCNOW) != 0) {
438		struct bounce_zone *bz;
439
440		/* Must bounce */
441
442		if ((error = alloc_bounce_zone(newtag)) != 0) {
443			free(newtag, M_BUSDMA);
444			return (error);
445		}
446		bz = newtag->bounce_zone;
447
448		if (ptoa(bz->total_bpages) < maxsize) {
449			int pages;
450
451			pages = atop(maxsize) - bz->total_bpages;
452
453			/* Add pages to our bounce pool */
454			if (alloc_bounce_pages(newtag, pages) < pages)
455				error = ENOMEM;
456		}
457		/* Performed initial allocation */
458		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
459	} else
460		newtag->bounce_zone = NULL;
461	if (error != 0)
462		free(newtag, M_BUSDMA);
463	else
464		*dmat = newtag;
465	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
466	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
467
468	return (error);
469}
470
471int
472bus_dma_tag_destroy(bus_dma_tag_t dmat)
473{
474#ifdef KTR
475	bus_dma_tag_t dmat_copy = dmat;
476#endif
477
478	if (dmat != NULL) {
479		if (dmat->map_count != 0)
480			return (EBUSY);
481
482		while (dmat != NULL) {
483			bus_dma_tag_t parent;
484
485			parent = dmat->parent;
486			atomic_subtract_int(&dmat->ref_count, 1);
487			if (dmat->ref_count == 0) {
488				if (dmat->segments != NULL)
489					free(dmat->segments, M_BUSDMA);
490				free(dmat, M_BUSDMA);
491				/*
492				 * Last reference count, so
493				 * release our reference
494				 * count on our parent.
495				 */
496				dmat = parent;
497			} else
498				dmat = NULL;
499		}
500	}
501	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
502
503	return (0);
504}
505
506#include <sys/kdb.h>
507/*
508 * Allocate a handle for mapping from kva/uva/physical
509 * address space into bus device space.
510 */
511int
512bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
513{
514	bus_dmamap_t newmap;
515	int error = 0;
516
517	if (dmat->segments == NULL) {
518		dmat->segments = (bus_dma_segment_t *)malloc(
519		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
520		    M_NOWAIT);
521		if (dmat->segments == NULL) {
522			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
523			    __func__, dmat, ENOMEM);
524			return (ENOMEM);
525		}
526	}
527
528	newmap = _busdma_alloc_dmamap(dmat);
529	if (newmap == NULL) {
530		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
531		return (ENOMEM);
532	}
533	*mapp = newmap;
534
535	/*
536	 * Bouncing might be required if the driver asks for an active
537	 * exclusion region, a data alignment that is stricter than 1, and/or
538	 * an active address boundary.
539	 */
540	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
541
542		/* Must bounce */
543		struct bounce_zone *bz;
544		int maxpages;
545
546		if (dmat->bounce_zone == NULL) {
547			if ((error = alloc_bounce_zone(dmat)) != 0) {
548				_busdma_free_dmamap(newmap);
549				*mapp = NULL;
550				return (error);
551			}
552		}
553		bz = dmat->bounce_zone;
554
555		/* Initialize the new map */
556		STAILQ_INIT(&((*mapp)->bpages));
557
558		/*
559		 * Attempt to add pages to our pool on a per-instance
560		 * basis up to a sane limit.
561		 */
562		maxpages = MAX_BPAGES;
563		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
564		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
565			int pages;
566
567			pages = MAX(atop(dmat->maxsize), 1);
568			pages = MIN(maxpages - bz->total_bpages, pages);
569			pages = MAX(pages, 1);
570			if (alloc_bounce_pages(dmat, pages) < pages)
571				error = ENOMEM;
572
573			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
574				if (error == 0)
575					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
576			} else {
577				error = 0;
578			}
579		}
580		bz->map_count++;
581	}
582
583	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
584	    __func__, dmat, dmat->flags, error);
585
586	return (0);
587}
588
589/*
590 * Destroy a handle for mapping from kva/uva/physical
591 * address space into bus device space.
592 */
593int
594bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
595{
596
597	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
598		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
599		    __func__, dmat, EBUSY);
600		return (EBUSY);
601	}
602	if (dmat->bounce_zone)
603		dmat->bounce_zone->map_count--;
604	_busdma_free_dmamap(map);
605	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
606        return (0);
607}
608
609/*
610 * Allocate a piece of memory that can be efficiently mapped into
611 * bus device space based on the constraints lited in the dma tag.
612 * A dmamap to for use with dmamap_load is also allocated.
613 */
614int
615bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
616    bus_dmamap_t *mapp)
617{
618	bus_dmamap_t newmap = NULL;
619	busdma_bufalloc_t ba;
620	struct busdma_bufzone *bufzone;
621	vm_memattr_t memattr;
622	void *vaddr;
623
624	int mflags;
625
626	if (flags & BUS_DMA_NOWAIT)
627		mflags = M_NOWAIT;
628	else
629		mflags = M_WAITOK;
630	if (dmat->segments == NULL) {
631		dmat->segments = (bus_dma_segment_t *)malloc(
632		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
633		    mflags);
634		if (dmat->segments == NULL) {
635			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
636			    __func__, dmat, dmat->flags, ENOMEM);
637			return (ENOMEM);
638		}
639	}
640
641	newmap = _busdma_alloc_dmamap(dmat);
642	if (newmap == NULL) {
643		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
644		    __func__, dmat, dmat->flags, ENOMEM);
645		return (ENOMEM);
646	}
647
648	/*
649	 * If all the memory is coherent with DMA then we don't need to
650	 * do anything special for a coherent mapping request.
651	 */
652	if (dmat->flags & BUS_DMA_COHERENT)
653	    flags &= ~BUS_DMA_COHERENT;
654
655	if (flags & BUS_DMA_COHERENT) {
656		memattr = VM_MEMATTR_UNCACHEABLE;
657		ba = coherent_allocator;
658		newmap->flags |= DMAMAP_UNCACHEABLE;
659	} else {
660		memattr = VM_MEMATTR_DEFAULT;
661		ba = standard_allocator;
662	}
663	/* All buffers we allocate are cache-aligned. */
664	newmap->flags |= DMAMAP_CACHE_ALIGNED;
665
666	if (flags & BUS_DMA_ZERO)
667		mflags |= M_ZERO;
668
669	/*
670	 * Try to find a bufzone in the allocator that holds a cache of buffers
671	 * of the right size for this request.  If the buffer is too big to be
672	 * held in the allocator cache, this returns NULL.
673	 */
674	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
675
676	/*
677	 * Allocate the buffer from the uma(9) allocator if...
678	 *  - It's small enough to be in the allocator (bufzone not NULL).
679	 *  - The alignment constraint isn't larger than the allocation size
680	 *    (the allocator aligns buffers to their size boundaries).
681	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
682	 * else allocate non-contiguous pages if...
683	 *  - The page count that could get allocated doesn't exceed nsegments.
684	 *  - The alignment constraint isn't larger than a page boundary.
685	 *  - There are no boundary-crossing constraints.
686	 * else allocate a block of contiguous pages because one or more of the
687	 * constraints is something that only the contig allocator can fulfill.
688	 */
689	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
690	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
691		vaddr = uma_zalloc(bufzone->umazone, mflags);
692	} else if (dmat->nsegments >= btoc(dmat->maxsize) &&
693	    dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) {
694		vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
695		    mflags, 0, dmat->lowaddr, memattr);
696	} else {
697		vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
698		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
699		    memattr);
700	}
701	if (vaddr == NULL) {
702		_busdma_free_dmamap(newmap);
703		newmap = NULL;
704	} else {
705		newmap->sync_count = 0;
706	}
707	*vaddrp = vaddr;
708	*mapp = newmap;
709
710	return (vaddr == NULL ? ENOMEM : 0);
711}
712
713/*
714 * Free a piece of memory and it's allocated dmamap, that was allocated
715 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
716 */
717void
718bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
719{
720	struct busdma_bufzone *bufzone;
721	busdma_bufalloc_t ba;
722
723	if (map->flags & DMAMAP_UNCACHEABLE)
724		ba = coherent_allocator;
725	else
726		ba = standard_allocator;
727
728	free(map->slist, M_BUSDMA);
729	uma_zfree(dmamap_zone, map);
730
731	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
732
733	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
734	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
735		uma_zfree(bufzone->umazone, vaddr);
736	else
737		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
738	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
739}
740
741static void
742_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
743    bus_size_t buflen, int flags)
744{
745	bus_addr_t curaddr;
746	bus_size_t sgsize;
747
748	if ((map->pagesneeded == 0)) {
749		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
750		    dmat->lowaddr, dmat->boundary, dmat->alignment);
751		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
752		    map, map->pagesneeded);
753		/*
754		 * Count the number of bounce pages
755		 * needed in order to complete this transfer
756		 */
757		curaddr = buf;
758		while (buflen != 0) {
759			sgsize = MIN(buflen, dmat->maxsegsz);
760			if (run_filter(dmat, curaddr) != 0) {
761				sgsize = MIN(sgsize, PAGE_SIZE);
762				map->pagesneeded++;
763			}
764			curaddr += sgsize;
765			buflen -= sgsize;
766		}
767		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
768	}
769}
770
771static void
772_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
773    void *buf, bus_size_t buflen, int flags)
774{
775	vm_offset_t vaddr;
776	vm_offset_t vendaddr;
777	bus_addr_t paddr;
778
779	if ((map->pagesneeded == 0)) {
780		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
781		    dmat->lowaddr, dmat->boundary, dmat->alignment);
782		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
783		    map, map->pagesneeded);
784		/*
785		 * Count the number of bounce pages
786		 * needed in order to complete this transfer
787		 */
788		vaddr = (vm_offset_t)buf;
789		vendaddr = (vm_offset_t)buf + buflen;
790
791		while (vaddr < vendaddr) {
792			bus_size_t sg_len;
793
794			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
795			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
796			paddr = pmap_kextract(vaddr);
797			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
798			    run_filter(dmat, paddr) != 0) {
799				sg_len = roundup2(sg_len, dmat->alignment);
800				map->pagesneeded++;
801			}
802			vaddr += sg_len;
803		}
804		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
805	}
806}
807
808static int
809_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
810{
811
812	/* Reserve Necessary Bounce Pages */
813	mtx_lock(&bounce_lock);
814	if (flags & BUS_DMA_NOWAIT) {
815		if (reserve_bounce_pages(dmat, map, 0) != 0) {
816			mtx_unlock(&bounce_lock);
817			return (ENOMEM);
818		}
819	} else {
820		if (reserve_bounce_pages(dmat, map, 1) != 0) {
821			/* Queue us for resources */
822			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
823			    map, links);
824			mtx_unlock(&bounce_lock);
825			return (EINPROGRESS);
826		}
827	}
828	mtx_unlock(&bounce_lock);
829
830	return (0);
831}
832
833/*
834 * Add a single contiguous physical range to the segment list.
835 */
836static int
837_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
838    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
839{
840	bus_addr_t baddr, bmask;
841	int seg;
842
843	/*
844	 * Make sure we don't cross any boundaries.
845	 */
846	bmask = ~(dmat->boundary - 1);
847	if (dmat->boundary > 0) {
848		baddr = (curaddr + dmat->boundary) & bmask;
849		if (sgsize > (baddr - curaddr))
850			sgsize = (baddr - curaddr);
851	}
852	/*
853	 * Insert chunk into a segment, coalescing with
854	 * the previous segment if possible.
855	 */
856	seg = *segp;
857	if (seg >= 0 &&
858	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
859	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
860	    (dmat->boundary == 0 ||
861	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
862		segs[seg].ds_len += sgsize;
863	} else {
864		if (++seg >= dmat->nsegments)
865			return (0);
866		segs[seg].ds_addr = curaddr;
867		segs[seg].ds_len = sgsize;
868	}
869	*segp = seg;
870	return (sgsize);
871}
872
873/*
874 * Utility function to load a physical buffer.  segp contains
875 * the starting segment on entrace, and the ending segment on exit.
876 */
877int
878_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
879    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
880    int *segp)
881{
882	bus_addr_t curaddr;
883	bus_size_t sgsize;
884	int error;
885
886	if (segs == NULL)
887		segs = dmat->segments;
888
889	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
890		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
891		if (map->pagesneeded != 0) {
892			error = _bus_dmamap_reserve_pages(dmat, map, flags);
893			if (error)
894				return (error);
895		}
896	}
897
898	while (buflen > 0) {
899		curaddr = buf;
900		sgsize = MIN(buflen, dmat->maxsegsz);
901		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
902		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
903			sgsize = MIN(sgsize, PAGE_SIZE);
904			curaddr = add_bounce_page(dmat, map, 0, curaddr,
905			    sgsize);
906		}
907		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
908		    segp);
909		if (sgsize == 0)
910			break;
911		buf += sgsize;
912		buflen -= sgsize;
913	}
914
915	/*
916	 * Did we fit?
917	 */
918	if (buflen != 0) {
919		_bus_dmamap_unload(dmat, map);
920		return (EFBIG); /* XXX better return value here? */
921	}
922	return (0);
923}
924
925int
926_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
927    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
928    bus_dma_segment_t *segs, int *segp)
929{
930
931	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
932	    segs, segp));
933}
934
935/*
936 * Utility function to load a linear buffer.  segp contains
937 * the starting segment on entrance, and the ending segment on exit.
938 * first indicates if this is the first invocation of this function.
939 */
940int
941_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
942    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
943    int *segp)
944{
945	bus_size_t sgsize;
946	bus_addr_t curaddr;
947	struct sync_list *sl;
948	vm_offset_t vaddr = (vm_offset_t)buf;
949	int error = 0;
950
951
952	if (segs == NULL)
953		segs = dmat->segments;
954	if ((flags & BUS_DMA_LOAD_MBUF) != 0)
955		map->flags |= DMAMAP_CACHE_ALIGNED;
956
957	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
958		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
959		if (map->pagesneeded != 0) {
960			error = _bus_dmamap_reserve_pages(dmat, map, flags);
961			if (error)
962				return (error);
963		}
964	}
965	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
966	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
967
968	while (buflen > 0) {
969		/*
970		 * Get the physical address for this segment.
971		 *
972		 * XXX Don't support checking for coherent mappings
973		 * XXX in user address space.
974		 */
975		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
976		curaddr = pmap_kextract(vaddr);
977
978		/*
979		 * Compute the segment size, and adjust counts.
980		 */
981		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
982		if (sgsize > dmat->maxsegsz)
983			sgsize = dmat->maxsegsz;
984		if (buflen < sgsize)
985			sgsize = buflen;
986
987		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
988		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
989			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
990			    sgsize);
991		} else {
992			sl = &map->slist[map->sync_count - 1];
993			if (map->sync_count == 0 ||
994			    vaddr != sl->vaddr + sl->datacount) {
995				if (++map->sync_count > dmat->nsegments)
996					goto cleanup;
997				sl++;
998				sl->vaddr = vaddr;
999				sl->datacount = sgsize;
1000				sl->busaddr = curaddr;
1001			} else
1002				sl->datacount += sgsize;
1003		}
1004		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1005		    segp);
1006		if (sgsize == 0)
1007			break;
1008		vaddr += sgsize;
1009		buflen -= sgsize;
1010	}
1011
1012cleanup:
1013	/*
1014	 * Did we fit?
1015	 */
1016	if (buflen != 0) {
1017		_bus_dmamap_unload(dmat, map);
1018		error = EFBIG; /* XXX better return value here? */
1019	}
1020	return (error);
1021}
1022
1023void
1024__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1025    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
1026{
1027
1028	KASSERT(dmat != NULL, ("dmatag is NULL"));
1029	KASSERT(map != NULL, ("dmamap is NULL"));
1030	map->mem = *mem;
1031	map->callback = callback;
1032	map->callback_arg = callback_arg;
1033}
1034
1035bus_dma_segment_t *
1036_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1037    bus_dma_segment_t *segs, int nsegs, int error)
1038{
1039
1040	if (segs == NULL)
1041		segs = dmat->segments;
1042	return (segs);
1043}
1044
1045/*
1046 * Release the mapping held by map.
1047 */
1048void
1049_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1050{
1051	struct bounce_page *bpage;
1052
1053	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1054		STAILQ_REMOVE_HEAD(&map->bpages, links);
1055		free_bounce_page(dmat, bpage);
1056	}
1057	map->sync_count = 0;
1058	return;
1059}
1060
1061static void
1062bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
1063{
1064	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1065	vm_offset_t buf_cl, buf_clend;
1066	vm_size_t size_cl, size_clend;
1067	int cache_linesize_mask = mips_pdcache_linesize - 1;
1068
1069	/*
1070	 * dcache invalidation operates on cache line aligned addresses
1071	 * and could modify areas of memory that share the same cache line
1072	 * at the beginning and the ending of the buffer. In order to
1073	 * prevent a data loss we save these chunks in temporary buffer
1074	 * before invalidation and restore them afer it.
1075	 *
1076	 * If the aligned flag is set the buffer is either an mbuf or came from
1077	 * our allocator caches.  In both cases they are always sized and
1078	 * aligned to cacheline boundaries, so we can skip preserving nearby
1079	 * data if a transfer appears to overlap cachelines.  An mbuf in
1080	 * particular will usually appear to be overlapped because of offsetting
1081	 * within the buffer to align the L3 headers, but we know that the bytes
1082	 * preceeding that offset are part of the same mbuf memory and are not
1083	 * unrelated adjacent data (and a rule of mbuf handling is that the cpu
1084	 * is not allowed to touch the mbuf while dma is in progress, including
1085	 * header fields).
1086	 */
1087	if (aligned) {
1088		size_cl = 0;
1089		size_clend = 0;
1090	} else {
1091		buf_cl = buf & ~cache_linesize_mask;
1092		size_cl = buf & cache_linesize_mask;
1093		buf_clend = buf + len;
1094		size_clend = (mips_pdcache_linesize -
1095		    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1096	}
1097
1098	switch (op) {
1099	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1100	case BUS_DMASYNC_POSTREAD:
1101
1102		/*
1103		 * Save buffers that might be modified by invalidation
1104		 */
1105		if (size_cl)
1106			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1107		if (size_clend)
1108			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1109		mips_dcache_inv_range(buf, len);
1110		/*
1111		 * Restore them
1112		 */
1113		if (size_cl)
1114			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1115		if (size_clend)
1116			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1117		/*
1118		 * Copies above have brought corresponding memory
1119		 * cache lines back into dirty state. Write them back
1120		 * out and invalidate affected cache lines again if
1121		 * necessary.
1122		 */
1123		if (size_cl)
1124			mips_dcache_wbinv_range(buf_cl, size_cl);
1125		if (size_clend && (size_cl == 0 ||
1126                    buf_clend - buf_cl > mips_pdcache_linesize))
1127			mips_dcache_wbinv_range(buf_clend, size_clend);
1128		break;
1129
1130	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1131		mips_dcache_wbinv_range(buf, len);
1132		break;
1133
1134	case BUS_DMASYNC_PREREAD:
1135		/*
1136		 * Save buffers that might be modified by invalidation
1137		 */
1138		if (size_cl)
1139			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1140		if (size_clend)
1141			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1142		mips_dcache_inv_range(buf, len);
1143		/*
1144		 * Restore them
1145		 */
1146		if (size_cl)
1147			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1148		if (size_clend)
1149			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1150		/*
1151		 * Copies above have brought corresponding memory
1152		 * cache lines back into dirty state. Write them back
1153		 * out and invalidate affected cache lines again if
1154		 * necessary.
1155		 */
1156		if (size_cl)
1157			mips_dcache_wbinv_range(buf_cl, size_cl);
1158		if (size_clend && (size_cl == 0 ||
1159                    buf_clend - buf_cl > mips_pdcache_linesize))
1160			mips_dcache_wbinv_range(buf_clend, size_clend);
1161		break;
1162
1163	case BUS_DMASYNC_PREWRITE:
1164		mips_dcache_wb_range(buf, len);
1165		break;
1166	}
1167}
1168
1169static void
1170_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1171{
1172	struct bounce_page *bpage;
1173
1174	STAILQ_FOREACH(bpage, &map->bpages, links) {
1175		if (op & BUS_DMASYNC_PREWRITE) {
1176			if (bpage->datavaddr != 0)
1177				bcopy((void *)bpage->datavaddr,
1178				    (void *)(bpage->vaddr_nocache != 0 ?
1179					     bpage->vaddr_nocache :
1180					     bpage->vaddr),
1181				    bpage->datacount);
1182			else
1183				physcopyout(bpage->dataaddr,
1184				    (void *)(bpage->vaddr_nocache != 0 ?
1185					     bpage->vaddr_nocache :
1186					     bpage->vaddr),
1187				    bpage->datacount);
1188			if (bpage->vaddr_nocache == 0) {
1189				mips_dcache_wb_range(bpage->vaddr,
1190				    bpage->datacount);
1191			}
1192			dmat->bounce_zone->total_bounced++;
1193		}
1194		if (op & BUS_DMASYNC_POSTREAD) {
1195			if (bpage->vaddr_nocache == 0) {
1196				mips_dcache_inv_range(bpage->vaddr,
1197				    bpage->datacount);
1198			}
1199			if (bpage->datavaddr != 0)
1200				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1201				    bpage->vaddr_nocache : bpage->vaddr),
1202				    (void *)bpage->datavaddr, bpage->datacount);
1203			else
1204				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1205				    bpage->vaddr_nocache : bpage->vaddr),
1206				    bpage->dataaddr, bpage->datacount);
1207			dmat->bounce_zone->total_bounced++;
1208		}
1209	}
1210}
1211
1212void
1213_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1214{
1215	struct sync_list *sl, *end;
1216	int aligned;
1217
1218	if (op == BUS_DMASYNC_POSTWRITE)
1219		return;
1220	if (STAILQ_FIRST(&map->bpages))
1221		_bus_dmamap_sync_bp(dmat, map, op);
1222
1223	if ((dmat->flags & BUS_DMA_COHERENT) ||
1224	    (map->flags & DMAMAP_UNCACHEABLE)) {
1225		if (op & BUS_DMASYNC_PREWRITE)
1226			mips_sync();
1227		return;
1228	}
1229
1230	aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
1231
1232	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1233	if (map->sync_count) {
1234		end = &map->slist[map->sync_count];
1235		for (sl = &map->slist[0]; sl != end; sl++)
1236			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
1237			    aligned);
1238	}
1239}
1240
1241static void
1242init_bounce_pages(void *dummy __unused)
1243{
1244
1245	total_bpages = 0;
1246	STAILQ_INIT(&bounce_zone_list);
1247	STAILQ_INIT(&bounce_map_waitinglist);
1248	STAILQ_INIT(&bounce_map_callbacklist);
1249	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1250}
1251SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1252
1253static struct sysctl_ctx_list *
1254busdma_sysctl_tree(struct bounce_zone *bz)
1255{
1256	return (&bz->sysctl_tree);
1257}
1258
1259static struct sysctl_oid *
1260busdma_sysctl_tree_top(struct bounce_zone *bz)
1261{
1262	return (bz->sysctl_tree_top);
1263}
1264
1265static int
1266alloc_bounce_zone(bus_dma_tag_t dmat)
1267{
1268	struct bounce_zone *bz;
1269
1270	/* Check to see if we already have a suitable zone */
1271	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1272		if ((dmat->alignment <= bz->alignment)
1273		 && (dmat->lowaddr >= bz->lowaddr)) {
1274			dmat->bounce_zone = bz;
1275			return (0);
1276		}
1277	}
1278
1279	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
1280	    M_NOWAIT | M_ZERO)) == NULL)
1281		return (ENOMEM);
1282
1283	STAILQ_INIT(&bz->bounce_page_list);
1284	bz->free_bpages = 0;
1285	bz->reserved_bpages = 0;
1286	bz->active_bpages = 0;
1287	bz->lowaddr = dmat->lowaddr;
1288	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1289	bz->map_count = 0;
1290	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1291	busdma_zonecount++;
1292	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1293	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1294	dmat->bounce_zone = bz;
1295
1296	sysctl_ctx_init(&bz->sysctl_tree);
1297	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1298	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1299	    CTLFLAG_RD, 0, "");
1300	if (bz->sysctl_tree_top == NULL) {
1301		sysctl_ctx_free(&bz->sysctl_tree);
1302		return (0);	/* XXX error code? */
1303	}
1304
1305	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1306	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1307	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1308	    "Total bounce pages");
1309	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1310	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1311	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1312	    "Free bounce pages");
1313	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1314	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1315	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1316	    "Reserved bounce pages");
1317	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1318	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1319	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1320	    "Active bounce pages");
1321	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1322	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1323	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1324	    "Total bounce requests");
1325	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1326	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1327	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1328	    "Total bounce requests that were deferred");
1329	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1330	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1331	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1332	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1333	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1334	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1335
1336	return (0);
1337}
1338
1339static int
1340alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1341{
1342	struct bounce_zone *bz;
1343	int count;
1344
1345	bz = dmat->bounce_zone;
1346	count = 0;
1347	while (numpages > 0) {
1348		struct bounce_page *bpage;
1349
1350		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
1351						     M_NOWAIT | M_ZERO);
1352
1353		if (bpage == NULL)
1354			break;
1355		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
1356							 M_NOWAIT, 0ul,
1357							 bz->lowaddr,
1358							 PAGE_SIZE,
1359							 0);
1360		if (bpage->vaddr == 0) {
1361			free(bpage, M_BUSDMA);
1362			break;
1363		}
1364		bpage->busaddr = pmap_kextract(bpage->vaddr);
1365		bpage->vaddr_nocache =
1366		    (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
1367		mtx_lock(&bounce_lock);
1368		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1369		total_bpages++;
1370		bz->total_bpages++;
1371		bz->free_bpages++;
1372		mtx_unlock(&bounce_lock);
1373		count++;
1374		numpages--;
1375	}
1376	return (count);
1377}
1378
1379static int
1380reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1381{
1382	struct bounce_zone *bz;
1383	int pages;
1384
1385	mtx_assert(&bounce_lock, MA_OWNED);
1386	bz = dmat->bounce_zone;
1387	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1388	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1389		return (map->pagesneeded - (map->pagesreserved + pages));
1390	bz->free_bpages -= pages;
1391	bz->reserved_bpages += pages;
1392	map->pagesreserved += pages;
1393	pages = map->pagesneeded - map->pagesreserved;
1394
1395	return (pages);
1396}
1397
1398static bus_addr_t
1399add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1400		bus_addr_t addr, bus_size_t size)
1401{
1402	struct bounce_zone *bz;
1403	struct bounce_page *bpage;
1404
1405	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1406	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1407
1408	bz = dmat->bounce_zone;
1409	if (map->pagesneeded == 0)
1410		panic("add_bounce_page: map doesn't need any pages");
1411	map->pagesneeded--;
1412
1413	if (map->pagesreserved == 0)
1414		panic("add_bounce_page: map doesn't need any pages");
1415	map->pagesreserved--;
1416
1417	mtx_lock(&bounce_lock);
1418	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1419	if (bpage == NULL)
1420		panic("add_bounce_page: free page list is empty");
1421
1422	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1423	bz->reserved_bpages--;
1424	bz->active_bpages++;
1425	mtx_unlock(&bounce_lock);
1426
1427	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1428		/* Page offset needs to be preserved. */
1429		bpage->vaddr |= addr & PAGE_MASK;
1430		bpage->busaddr |= addr & PAGE_MASK;
1431	}
1432	bpage->datavaddr = vaddr;
1433	bpage->dataaddr = addr;
1434	bpage->datacount = size;
1435	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1436	return (bpage->busaddr);
1437}
1438
1439static void
1440free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1441{
1442	struct bus_dmamap *map;
1443	struct bounce_zone *bz;
1444
1445	bz = dmat->bounce_zone;
1446	bpage->datavaddr = 0;
1447	bpage->datacount = 0;
1448	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1449		/*
1450		 * Reset the bounce page to start at offset 0.  Other uses
1451		 * of this bounce page may need to store a full page of
1452		 * data and/or assume it starts on a page boundary.
1453		 */
1454		bpage->vaddr &= ~PAGE_MASK;
1455		bpage->busaddr &= ~PAGE_MASK;
1456	}
1457
1458	mtx_lock(&bounce_lock);
1459	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1460	bz->free_bpages++;
1461	bz->active_bpages--;
1462	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1463		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1464			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1465			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1466					   map, links);
1467			busdma_swi_pending = 1;
1468			bz->total_deferred++;
1469			swi_sched(vm_ih, 0);
1470		}
1471	}
1472	mtx_unlock(&bounce_lock);
1473}
1474
1475void
1476busdma_swi(void)
1477{
1478	bus_dma_tag_t dmat;
1479	struct bus_dmamap *map;
1480
1481	mtx_lock(&bounce_lock);
1482	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1483		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1484		mtx_unlock(&bounce_lock);
1485		dmat = map->dmat;
1486		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1487		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1488		    map->callback_arg, BUS_DMA_WAITOK);
1489		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1490		mtx_lock(&bounce_lock);
1491	}
1492	mtx_unlock(&bounce_lock);
1493}
1494