1/*-
2 * Copyright (c) 2006 Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/mips/mips/busdma_machdep.c 318976 2017-05-27 07:47:52Z hselasky $");
31
32/*
33 * MIPS bus dma support routines
34 */
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/malloc.h>
39#include <sys/bus.h>
40#include <sys/busdma_bufalloc.h>
41#include <sys/interrupt.h>
42#include <sys/lock.h>
43#include <sys/proc.h>
44#include <sys/memdesc.h>
45#include <sys/mutex.h>
46#include <sys/ktr.h>
47#include <sys/kernel.h>
48#include <sys/sysctl.h>
49#include <sys/uio.h>
50
51#include <vm/uma.h>
52#include <vm/vm.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_kern.h>
55#include <vm/vm_page.h>
56#include <vm/vm_map.h>
57
58#include <machine/atomic.h>
59#include <machine/bus.h>
60#include <machine/cache.h>
61#include <machine/cpufunc.h>
62#include <machine/cpuinfo.h>
63#include <machine/md_var.h>
64
65#define MAX_BPAGES 64
66#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
67#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
68
69struct bounce_zone;
70
71struct bus_dma_tag {
72	bus_dma_tag_t		parent;
73	bus_size_t		alignment;
74	bus_addr_t		boundary;
75	bus_addr_t		lowaddr;
76	bus_addr_t		highaddr;
77	bus_dma_filter_t	*filter;
78	void			*filterarg;
79	bus_size_t		maxsize;
80	u_int			nsegments;
81	bus_size_t		maxsegsz;
82	int			flags;
83	int			ref_count;
84	int			map_count;
85	bus_dma_lock_t		*lockfunc;
86	void			*lockfuncarg;
87	bus_dma_segment_t	*segments;
88	struct bounce_zone *bounce_zone;
89};
90
91struct bounce_page {
92	vm_offset_t	vaddr;		/* kva of bounce buffer */
93	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
94	bus_addr_t	busaddr;	/* Physical address */
95	vm_offset_t	datavaddr;	/* kva of client data */
96	bus_addr_t	dataaddr;	/* client physical address */
97	bus_size_t	datacount;	/* client data count */
98	STAILQ_ENTRY(bounce_page) links;
99};
100
101struct sync_list {
102	vm_offset_t	vaddr;		/* kva of bounce buffer */
103	bus_addr_t	busaddr;	/* Physical address */
104	bus_size_t	datacount;	/* client data count */
105};
106
107int busdma_swi_pending;
108
109struct bounce_zone {
110	STAILQ_ENTRY(bounce_zone) links;
111	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
112	int		total_bpages;
113	int		free_bpages;
114	int		reserved_bpages;
115	int		active_bpages;
116	int		total_bounced;
117	int		total_deferred;
118	int		map_count;
119	bus_size_t	alignment;
120	bus_addr_t	lowaddr;
121	char		zoneid[8];
122	char		lowaddrid[20];
123	struct sysctl_ctx_list sysctl_tree;
124	struct sysctl_oid *sysctl_tree_top;
125};
126
127static struct mtx bounce_lock;
128static int total_bpages;
129static int busdma_zonecount;
130static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
131
132static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
133SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
134	   "Total bounce pages");
135
136#define DMAMAP_UNCACHEABLE	0x08
137#define DMAMAP_CACHE_ALIGNED	0x10
138
139struct bus_dmamap {
140	struct bp_list	bpages;
141	int		pagesneeded;
142	int		pagesreserved;
143	bus_dma_tag_t	dmat;
144	struct memdesc	mem;
145	int		flags;
146	void		*origbuffer;
147	void		*allocbuffer;
148	TAILQ_ENTRY(bus_dmamap)	freelist;
149	STAILQ_ENTRY(bus_dmamap) links;
150	bus_dmamap_callback_t *callback;
151	void		*callback_arg;
152	int		sync_count;
153	struct sync_list *slist;
154};
155
156static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
157static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
158
159static void init_bounce_pages(void *dummy);
160static int alloc_bounce_zone(bus_dma_tag_t dmat);
161static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
162static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
163				int commit);
164static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
165				  vm_offset_t vaddr, bus_addr_t addr,
166				  bus_size_t size);
167static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
168
169/* Default tag, as most drivers provide no parent tag. */
170bus_dma_tag_t mips_root_dma_tag;
171
172static uma_zone_t dmamap_zone;	/* Cache of struct bus_dmamap items */
173
174static busdma_bufalloc_t coherent_allocator;	/* Cache of coherent buffers */
175static busdma_bufalloc_t standard_allocator;	/* Cache of standard buffers */
176
177MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata");
178MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages");
179
180/*
181 * This is the ctor function passed to uma_zcreate() for the pool of dma maps.
182 * It'll need platform-specific changes if this code is copied.
183 */
184static int
185dmamap_ctor(void *mem, int size, void *arg, int flags)
186{
187	bus_dmamap_t map;
188	bus_dma_tag_t dmat;
189
190	map = (bus_dmamap_t)mem;
191	dmat = (bus_dma_tag_t)arg;
192
193	dmat->map_count++;
194
195	map->dmat = dmat;
196	map->flags = 0;
197	map->slist = NULL;
198	map->allocbuffer = NULL;
199	map->sync_count = 0;
200	STAILQ_INIT(&map->bpages);
201
202	return (0);
203}
204
205/*
206 * This is the dtor function passed to uma_zcreate() for the pool of dma maps.
207 * It may need platform-specific changes if this code is copied              .
208 */
209static void
210dmamap_dtor(void *mem, int size, void *arg)
211{
212	bus_dmamap_t map;
213
214	map = (bus_dmamap_t)mem;
215
216	map->dmat->map_count--;
217}
218
219static void
220busdma_init(void *dummy)
221{
222
223	/* Create a cache of maps for bus_dmamap_create(). */
224	dmamap_zone = uma_zcreate("dma maps", sizeof(struct bus_dmamap),
225	    dmamap_ctor, dmamap_dtor, NULL, NULL, UMA_ALIGN_PTR, 0);
226
227	/* Create a cache of buffers in standard (cacheable) memory. */
228	standard_allocator = busdma_bufalloc_create("buffer",
229	    mips_pdcache_linesize,	/* minimum_alignment */
230	    NULL,			/* uma_alloc func */
231	    NULL,			/* uma_free func */
232	    0);				/* uma_zcreate_flags */
233
234	/*
235	 * Create a cache of buffers in uncacheable memory, to implement the
236	 * BUS_DMA_COHERENT flag.
237	 */
238	coherent_allocator = busdma_bufalloc_create("coherent",
239	    mips_pdcache_linesize,	/* minimum_alignment */
240	    busdma_bufalloc_alloc_uncacheable,
241	    busdma_bufalloc_free_uncacheable,
242	    0);				/* uma_zcreate_flags */
243}
244SYSINIT(busdma, SI_SUB_KMEM, SI_ORDER_FOURTH, busdma_init, NULL);
245
246/*
247 * Return true if a match is made.
248 *
249 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
250 *
251 * If paddr is within the bounds of the dma tag then call the filter callback
252 * to check for a match, if there is no filter callback then assume a match.
253 */
254static int
255run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
256{
257	int retval;
258
259	retval = 0;
260
261	do {
262		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
263		 || ((paddr & (dmat->alignment - 1)) != 0))
264		 && (dmat->filter == NULL
265		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
266			retval = 1;
267
268		dmat = dmat->parent;
269	} while (retval == 0 && dmat != NULL);
270	return (retval);
271}
272
273/*
274 * Check to see if the specified page is in an allowed DMA range.
275 */
276
277static __inline int
278_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
279{
280	int i;
281	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
282		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
283		    || (lowaddr < phys_avail[i] &&
284		    highaddr > phys_avail[i]))
285			return (1);
286	}
287	return (0);
288}
289
290/*
291 * Convenience function for manipulating driver locks from busdma (during
292 * busdma_swi, for example).  Drivers that don't provide their own locks
293 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
294 * non-mutex locking scheme don't have to use this at all.
295 */
296void
297busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
298{
299	struct mtx *dmtx;
300
301	dmtx = (struct mtx *)arg;
302	switch (op) {
303	case BUS_DMA_LOCK:
304		mtx_lock(dmtx);
305		break;
306	case BUS_DMA_UNLOCK:
307		mtx_unlock(dmtx);
308		break;
309	default:
310		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
311	}
312}
313
314/*
315 * dflt_lock should never get called.  It gets put into the dma tag when
316 * lockfunc == NULL, which is only valid if the maps that are associated
317 * with the tag are meant to never be defered.
318 * XXX Should have a way to identify which driver is responsible here.
319 */
320static void
321dflt_lock(void *arg, bus_dma_lock_op_t op)
322{
323#ifdef INVARIANTS
324	panic("driver error: busdma dflt_lock called");
325#else
326	printf("DRIVER_ERROR: busdma dflt_lock called\n");
327#endif
328}
329
330static __inline bus_dmamap_t
331_busdma_alloc_dmamap(bus_dma_tag_t dmat)
332{
333	struct sync_list *slist;
334	bus_dmamap_t map;
335
336	slist = malloc(sizeof(*slist) * dmat->nsegments, M_BUSDMA, M_NOWAIT);
337	if (slist == NULL)
338		return (NULL);
339	map = uma_zalloc_arg(dmamap_zone, dmat, M_NOWAIT);
340	if (map != NULL)
341		map->slist = slist;
342	else
343		free(slist, M_BUSDMA);
344	return (map);
345}
346
347static __inline void
348_busdma_free_dmamap(bus_dmamap_t map)
349{
350
351	free(map->slist, M_BUSDMA);
352	uma_zfree(dmamap_zone, map);
353}
354
355/*
356 * Allocate a device specific dma_tag.
357 */
358#define SEG_NB 1024
359
360int
361bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
362    bus_addr_t boundary, bus_addr_t lowaddr,
363    bus_addr_t highaddr, bus_dma_filter_t *filter,
364    void *filterarg, bus_size_t maxsize, int nsegments,
365    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
366    void *lockfuncarg, bus_dma_tag_t *dmat)
367{
368	bus_dma_tag_t newtag;
369	int error = 0;
370	/* Return a NULL tag on failure */
371	*dmat = NULL;
372	if (!parent)
373		parent = mips_root_dma_tag;
374
375	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT);
376	if (newtag == NULL) {
377		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
378		    __func__, newtag, 0, error);
379		return (ENOMEM);
380	}
381
382	newtag->parent = parent;
383	newtag->alignment = alignment;
384	newtag->boundary = boundary;
385	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
386	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
387	newtag->filter = filter;
388	newtag->filterarg = filterarg;
389	newtag->maxsize = maxsize;
390	newtag->nsegments = nsegments;
391	newtag->maxsegsz = maxsegsz;
392	newtag->flags = flags;
393	if (cpuinfo.cache_coherent_dma)
394		newtag->flags |= BUS_DMA_COHERENT;
395	newtag->ref_count = 1; /* Count ourself */
396	newtag->map_count = 0;
397	if (lockfunc != NULL) {
398		newtag->lockfunc = lockfunc;
399		newtag->lockfuncarg = lockfuncarg;
400	} else {
401		newtag->lockfunc = dflt_lock;
402		newtag->lockfuncarg = NULL;
403	}
404	newtag->segments = NULL;
405
406	/*
407	 * Take into account any restrictions imposed by our parent tag
408	 */
409	if (parent != NULL) {
410		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
411		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
412		if (newtag->boundary == 0)
413			newtag->boundary = parent->boundary;
414		else if (parent->boundary != 0)
415			newtag->boundary =
416			    MIN(parent->boundary, newtag->boundary);
417		if ((newtag->filter != NULL) ||
418		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
419			newtag->flags |= BUS_DMA_COULD_BOUNCE;
420		if (newtag->filter == NULL) {
421			/*
422			* Short circuit looking at our parent directly
423			* since we have encapsulated all of its information
424			*/
425			newtag->filter = parent->filter;
426			newtag->filterarg = parent->filterarg;
427			newtag->parent = parent->parent;
428		}
429		if (newtag->parent != NULL)
430			atomic_add_int(&parent->ref_count, 1);
431	}
432	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
433	 || newtag->alignment > 1)
434		newtag->flags |= BUS_DMA_COULD_BOUNCE;
435
436	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
437	    (flags & BUS_DMA_ALLOCNOW) != 0) {
438		struct bounce_zone *bz;
439
440		/* Must bounce */
441
442		if ((error = alloc_bounce_zone(newtag)) != 0) {
443			free(newtag, M_BUSDMA);
444			return (error);
445		}
446		bz = newtag->bounce_zone;
447
448		if (ptoa(bz->total_bpages) < maxsize) {
449			int pages;
450
451			pages = atop(maxsize) - bz->total_bpages;
452
453			/* Add pages to our bounce pool */
454			if (alloc_bounce_pages(newtag, pages) < pages)
455				error = ENOMEM;
456		}
457		/* Performed initial allocation */
458		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
459	} else
460		newtag->bounce_zone = NULL;
461	if (error != 0)
462		free(newtag, M_BUSDMA);
463	else
464		*dmat = newtag;
465	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
466	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
467
468	return (error);
469}
470
471int
472bus_dma_tag_destroy(bus_dma_tag_t dmat)
473{
474#ifdef KTR
475	bus_dma_tag_t dmat_copy = dmat;
476#endif
477
478	if (dmat != NULL) {
479		if (dmat->map_count != 0)
480			return (EBUSY);
481
482		while (dmat != NULL) {
483			bus_dma_tag_t parent;
484
485			parent = dmat->parent;
486			atomic_subtract_int(&dmat->ref_count, 1);
487			if (dmat->ref_count == 0) {
488				if (dmat->segments != NULL)
489					free(dmat->segments, M_BUSDMA);
490				free(dmat, M_BUSDMA);
491				/*
492				 * Last reference count, so
493				 * release our reference
494				 * count on our parent.
495				 */
496				dmat = parent;
497			} else
498				dmat = NULL;
499		}
500	}
501	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
502
503	return (0);
504}
505
506#include <sys/kdb.h>
507/*
508 * Allocate a handle for mapping from kva/uva/physical
509 * address space into bus device space.
510 */
511int
512bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
513{
514	bus_dmamap_t newmap;
515	int error = 0;
516
517	if (dmat->segments == NULL) {
518		dmat->segments = (bus_dma_segment_t *)malloc(
519		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
520		    M_NOWAIT);
521		if (dmat->segments == NULL) {
522			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
523			    __func__, dmat, ENOMEM);
524			return (ENOMEM);
525		}
526	}
527
528	newmap = _busdma_alloc_dmamap(dmat);
529	if (newmap == NULL) {
530		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
531		return (ENOMEM);
532	}
533	*mapp = newmap;
534
535	/*
536	 * Bouncing might be required if the driver asks for an active
537	 * exclusion region, a data alignment that is stricter than 1, and/or
538	 * an active address boundary.
539	 */
540	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
541
542		/* Must bounce */
543		struct bounce_zone *bz;
544		int maxpages;
545
546		if (dmat->bounce_zone == NULL) {
547			if ((error = alloc_bounce_zone(dmat)) != 0) {
548				_busdma_free_dmamap(newmap);
549				*mapp = NULL;
550				return (error);
551			}
552		}
553		bz = dmat->bounce_zone;
554
555		/* Initialize the new map */
556		STAILQ_INIT(&((*mapp)->bpages));
557
558		/*
559		 * Attempt to add pages to our pool on a per-instance
560		 * basis up to a sane limit.
561		 */
562		maxpages = MAX_BPAGES;
563		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
564		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
565			int pages;
566
567			pages = MAX(atop(dmat->maxsize), 1);
568			pages = MIN(maxpages - bz->total_bpages, pages);
569			pages = MAX(pages, 1);
570			if (alloc_bounce_pages(dmat, pages) < pages)
571				error = ENOMEM;
572
573			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
574				if (error == 0)
575					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
576			} else {
577				error = 0;
578			}
579		}
580		bz->map_count++;
581	}
582
583	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
584	    __func__, dmat, dmat->flags, error);
585
586	return (0);
587}
588
589/*
590 * Destroy a handle for mapping from kva/uva/physical
591 * address space into bus device space.
592 */
593int
594bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
595{
596
597	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
598		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
599		    __func__, dmat, EBUSY);
600		return (EBUSY);
601	}
602	if (dmat->bounce_zone)
603		dmat->bounce_zone->map_count--;
604	_busdma_free_dmamap(map);
605	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
606        return (0);
607}
608
609/*
610 * Allocate a piece of memory that can be efficiently mapped into
611 * bus device space based on the constraints lited in the dma tag.
612 * A dmamap to for use with dmamap_load is also allocated.
613 */
614int
615bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddrp, int flags,
616    bus_dmamap_t *mapp)
617{
618	bus_dmamap_t newmap = NULL;
619	busdma_bufalloc_t ba;
620	struct busdma_bufzone *bufzone;
621	vm_memattr_t memattr;
622	void *vaddr;
623
624	int mflags;
625
626	if (flags & BUS_DMA_NOWAIT)
627		mflags = M_NOWAIT;
628	else
629		mflags = M_WAITOK;
630	if (dmat->segments == NULL) {
631		dmat->segments = (bus_dma_segment_t *)malloc(
632		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_BUSDMA,
633		    mflags);
634		if (dmat->segments == NULL) {
635			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
636			    __func__, dmat, dmat->flags, ENOMEM);
637			return (ENOMEM);
638		}
639	}
640
641	newmap = _busdma_alloc_dmamap(dmat);
642	if (newmap == NULL) {
643		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
644		    __func__, dmat, dmat->flags, ENOMEM);
645		return (ENOMEM);
646	}
647
648	/*
649	 * If all the memory is coherent with DMA then we don't need to
650	 * do anything special for a coherent mapping request.
651	 */
652	if (dmat->flags & BUS_DMA_COHERENT)
653	    flags &= ~BUS_DMA_COHERENT;
654
655	if (flags & BUS_DMA_COHERENT) {
656		memattr = VM_MEMATTR_UNCACHEABLE;
657		ba = coherent_allocator;
658		newmap->flags |= DMAMAP_UNCACHEABLE;
659	} else {
660		memattr = VM_MEMATTR_DEFAULT;
661		ba = standard_allocator;
662	}
663	/* All buffers we allocate are cache-aligned. */
664	newmap->flags |= DMAMAP_CACHE_ALIGNED;
665
666	if (flags & BUS_DMA_ZERO)
667		mflags |= M_ZERO;
668
669	/*
670	 * Try to find a bufzone in the allocator that holds a cache of buffers
671	 * of the right size for this request.  If the buffer is too big to be
672	 * held in the allocator cache, this returns NULL.
673	 */
674	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
675
676	/*
677	 * Allocate the buffer from the uma(9) allocator if...
678	 *  - It's small enough to be in the allocator (bufzone not NULL).
679	 *  - The alignment constraint isn't larger than the allocation size
680	 *    (the allocator aligns buffers to their size boundaries).
681	 *  - There's no need to handle lowaddr/highaddr exclusion zones.
682	 * else allocate non-contiguous pages if...
683	 *  - The page count that could get allocated doesn't exceed
684	 *    nsegments also when the maximum segment size is less
685	 *    than PAGE_SIZE.
686	 *  - The alignment constraint isn't larger than a page boundary.
687	 *  - There are no boundary-crossing constraints.
688	 * else allocate a block of contiguous pages because one or more of the
689	 * constraints is something that only the contig allocator can fulfill.
690	 */
691	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
692	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) {
693		vaddr = uma_zalloc(bufzone->umazone, mflags);
694	} else if (dmat->nsegments >=
695	    howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
696	    dmat->alignment <= PAGE_SIZE &&
697	    (dmat->boundary % PAGE_SIZE) == 0) {
698		vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize,
699		    mflags, 0, dmat->lowaddr, memattr);
700	} else {
701		vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize,
702		    mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary,
703		    memattr);
704	}
705	if (vaddr == NULL) {
706		_busdma_free_dmamap(newmap);
707		newmap = NULL;
708	} else {
709		newmap->sync_count = 0;
710	}
711	*vaddrp = vaddr;
712	*mapp = newmap;
713
714	return (vaddr == NULL ? ENOMEM : 0);
715}
716
717/*
718 * Free a piece of memory and it's allocated dmamap, that was allocated
719 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
720 */
721void
722bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
723{
724	struct busdma_bufzone *bufzone;
725	busdma_bufalloc_t ba;
726
727	if (map->flags & DMAMAP_UNCACHEABLE)
728		ba = coherent_allocator;
729	else
730		ba = standard_allocator;
731
732	free(map->slist, M_BUSDMA);
733	uma_zfree(dmamap_zone, map);
734
735	bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize);
736
737	if (bufzone != NULL && dmat->alignment <= bufzone->size &&
738	    !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
739		uma_zfree(bufzone->umazone, vaddr);
740	else
741		kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
742	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
743}
744
745static void
746_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
747    bus_size_t buflen, int flags)
748{
749	bus_addr_t curaddr;
750	bus_size_t sgsize;
751
752	if ((map->pagesneeded == 0)) {
753		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
754		    dmat->lowaddr, dmat->boundary, dmat->alignment);
755		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
756		    map, map->pagesneeded);
757		/*
758		 * Count the number of bounce pages
759		 * needed in order to complete this transfer
760		 */
761		curaddr = buf;
762		while (buflen != 0) {
763			sgsize = MIN(buflen, dmat->maxsegsz);
764			if (run_filter(dmat, curaddr) != 0) {
765				sgsize = MIN(sgsize, PAGE_SIZE);
766				map->pagesneeded++;
767			}
768			curaddr += sgsize;
769			buflen -= sgsize;
770		}
771		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
772	}
773}
774
775static void
776_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
777    void *buf, bus_size_t buflen, int flags)
778{
779	vm_offset_t vaddr;
780	vm_offset_t vendaddr;
781	bus_addr_t paddr;
782
783	if ((map->pagesneeded == 0)) {
784		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
785		    dmat->lowaddr, dmat->boundary, dmat->alignment);
786		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
787		    map, map->pagesneeded);
788		/*
789		 * Count the number of bounce pages
790		 * needed in order to complete this transfer
791		 */
792		vaddr = (vm_offset_t)buf;
793		vendaddr = (vm_offset_t)buf + buflen;
794
795		while (vaddr < vendaddr) {
796			bus_size_t sg_len;
797
798			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
799			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
800			paddr = pmap_kextract(vaddr);
801			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
802			    run_filter(dmat, paddr) != 0) {
803				sg_len = roundup2(sg_len, dmat->alignment);
804				map->pagesneeded++;
805			}
806			vaddr += sg_len;
807		}
808		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
809	}
810}
811
812static int
813_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
814{
815
816	/* Reserve Necessary Bounce Pages */
817	mtx_lock(&bounce_lock);
818	if (flags & BUS_DMA_NOWAIT) {
819		if (reserve_bounce_pages(dmat, map, 0) != 0) {
820			mtx_unlock(&bounce_lock);
821			return (ENOMEM);
822		}
823	} else {
824		if (reserve_bounce_pages(dmat, map, 1) != 0) {
825			/* Queue us for resources */
826			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
827			    map, links);
828			mtx_unlock(&bounce_lock);
829			return (EINPROGRESS);
830		}
831	}
832	mtx_unlock(&bounce_lock);
833
834	return (0);
835}
836
837/*
838 * Add a single contiguous physical range to the segment list.
839 */
840static int
841_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
842    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
843{
844	bus_addr_t baddr, bmask;
845	int seg;
846
847	/*
848	 * Make sure we don't cross any boundaries.
849	 */
850	bmask = ~(dmat->boundary - 1);
851	if (dmat->boundary > 0) {
852		baddr = (curaddr + dmat->boundary) & bmask;
853		if (sgsize > (baddr - curaddr))
854			sgsize = (baddr - curaddr);
855	}
856	/*
857	 * Insert chunk into a segment, coalescing with
858	 * the previous segment if possible.
859	 */
860	seg = *segp;
861	if (seg >= 0 &&
862	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
863	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
864	    (dmat->boundary == 0 ||
865	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
866		segs[seg].ds_len += sgsize;
867	} else {
868		if (++seg >= dmat->nsegments)
869			return (0);
870		segs[seg].ds_addr = curaddr;
871		segs[seg].ds_len = sgsize;
872	}
873	*segp = seg;
874	return (sgsize);
875}
876
877/*
878 * Utility function to load a physical buffer.  segp contains
879 * the starting segment on entrace, and the ending segment on exit.
880 */
881int
882_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
883    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
884    int *segp)
885{
886	bus_addr_t curaddr;
887	bus_size_t sgsize;
888	int error;
889
890	if (segs == NULL)
891		segs = dmat->segments;
892
893	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
894		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
895		if (map->pagesneeded != 0) {
896			error = _bus_dmamap_reserve_pages(dmat, map, flags);
897			if (error)
898				return (error);
899		}
900	}
901
902	while (buflen > 0) {
903		curaddr = buf;
904		sgsize = MIN(buflen, dmat->maxsegsz);
905		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
906		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
907			sgsize = MIN(sgsize, PAGE_SIZE);
908			curaddr = add_bounce_page(dmat, map, 0, curaddr,
909			    sgsize);
910		}
911		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
912		    segp);
913		if (sgsize == 0)
914			break;
915		buf += sgsize;
916		buflen -= sgsize;
917	}
918
919	/*
920	 * Did we fit?
921	 */
922	if (buflen != 0) {
923		_bus_dmamap_unload(dmat, map);
924		return (EFBIG); /* XXX better return value here? */
925	}
926	return (0);
927}
928
929int
930_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
931    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
932    bus_dma_segment_t *segs, int *segp)
933{
934
935	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
936	    segs, segp));
937}
938
939/*
940 * Utility function to load a linear buffer.  segp contains
941 * the starting segment on entrance, and the ending segment on exit.
942 * first indicates if this is the first invocation of this function.
943 */
944int
945_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
946    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
947    int *segp)
948{
949	bus_size_t sgsize;
950	bus_addr_t curaddr;
951	struct sync_list *sl;
952	vm_offset_t vaddr = (vm_offset_t)buf;
953	int error = 0;
954
955
956	if (segs == NULL)
957		segs = dmat->segments;
958	if ((flags & BUS_DMA_LOAD_MBUF) != 0)
959		map->flags |= DMAMAP_CACHE_ALIGNED;
960
961	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
962		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
963		if (map->pagesneeded != 0) {
964			error = _bus_dmamap_reserve_pages(dmat, map, flags);
965			if (error)
966				return (error);
967		}
968	}
969	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
970	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
971
972	while (buflen > 0) {
973		/*
974		 * Get the physical address for this segment.
975		 *
976		 * XXX Don't support checking for coherent mappings
977		 * XXX in user address space.
978		 */
979		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
980		curaddr = pmap_kextract(vaddr);
981
982		/*
983		 * Compute the segment size, and adjust counts.
984		 */
985		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
986		if (sgsize > dmat->maxsegsz)
987			sgsize = dmat->maxsegsz;
988		if (buflen < sgsize)
989			sgsize = buflen;
990
991		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
992		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
993			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
994			    sgsize);
995		} else {
996			sl = &map->slist[map->sync_count - 1];
997			if (map->sync_count == 0 ||
998			    vaddr != sl->vaddr + sl->datacount) {
999				if (++map->sync_count > dmat->nsegments)
1000					goto cleanup;
1001				sl++;
1002				sl->vaddr = vaddr;
1003				sl->datacount = sgsize;
1004				sl->busaddr = curaddr;
1005			} else
1006				sl->datacount += sgsize;
1007		}
1008		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
1009		    segp);
1010		if (sgsize == 0)
1011			break;
1012		vaddr += sgsize;
1013		buflen -= sgsize;
1014	}
1015
1016cleanup:
1017	/*
1018	 * Did we fit?
1019	 */
1020	if (buflen != 0) {
1021		_bus_dmamap_unload(dmat, map);
1022		error = EFBIG; /* XXX better return value here? */
1023	}
1024	return (error);
1025}
1026
1027void
1028__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1029    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
1030{
1031
1032	KASSERT(dmat != NULL, ("dmatag is NULL"));
1033	KASSERT(map != NULL, ("dmamap is NULL"));
1034	map->mem = *mem;
1035	map->callback = callback;
1036	map->callback_arg = callback_arg;
1037}
1038
1039bus_dma_segment_t *
1040_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
1041    bus_dma_segment_t *segs, int nsegs, int error)
1042{
1043
1044	if (segs == NULL)
1045		segs = dmat->segments;
1046	return (segs);
1047}
1048
1049/*
1050 * Release the mapping held by map.
1051 */
1052void
1053_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1054{
1055	struct bounce_page *bpage;
1056
1057	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1058		STAILQ_REMOVE_HEAD(&map->bpages, links);
1059		free_bounce_page(dmat, bpage);
1060	}
1061	map->sync_count = 0;
1062	return;
1063}
1064
1065static void
1066bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, int aligned)
1067{
1068	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1069	vm_offset_t buf_cl, buf_clend;
1070	vm_size_t size_cl, size_clend;
1071	int cache_linesize_mask = mips_pdcache_linesize - 1;
1072
1073	/*
1074	 * dcache invalidation operates on cache line aligned addresses
1075	 * and could modify areas of memory that share the same cache line
1076	 * at the beginning and the ending of the buffer. In order to
1077	 * prevent a data loss we save these chunks in temporary buffer
1078	 * before invalidation and restore them afer it.
1079	 *
1080	 * If the aligned flag is set the buffer is either an mbuf or came from
1081	 * our allocator caches.  In both cases they are always sized and
1082	 * aligned to cacheline boundaries, so we can skip preserving nearby
1083	 * data if a transfer appears to overlap cachelines.  An mbuf in
1084	 * particular will usually appear to be overlapped because of offsetting
1085	 * within the buffer to align the L3 headers, but we know that the bytes
1086	 * preceeding that offset are part of the same mbuf memory and are not
1087	 * unrelated adjacent data (and a rule of mbuf handling is that the cpu
1088	 * is not allowed to touch the mbuf while dma is in progress, including
1089	 * header fields).
1090	 */
1091	if (aligned) {
1092		size_cl = 0;
1093		size_clend = 0;
1094	} else {
1095		buf_cl = buf & ~cache_linesize_mask;
1096		size_cl = buf & cache_linesize_mask;
1097		buf_clend = buf + len;
1098		size_clend = (mips_pdcache_linesize -
1099		    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1100	}
1101
1102	switch (op) {
1103	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1104	case BUS_DMASYNC_POSTREAD:
1105
1106		/*
1107		 * Save buffers that might be modified by invalidation
1108		 */
1109		if (size_cl)
1110			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1111		if (size_clend)
1112			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1113		mips_dcache_inv_range(buf, len);
1114		/*
1115		 * Restore them
1116		 */
1117		if (size_cl)
1118			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1119		if (size_clend)
1120			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1121		/*
1122		 * Copies above have brought corresponding memory
1123		 * cache lines back into dirty state. Write them back
1124		 * out and invalidate affected cache lines again if
1125		 * necessary.
1126		 */
1127		if (size_cl)
1128			mips_dcache_wbinv_range(buf_cl, size_cl);
1129		if (size_clend && (size_cl == 0 ||
1130                    buf_clend - buf_cl > mips_pdcache_linesize))
1131			mips_dcache_wbinv_range(buf_clend, size_clend);
1132		break;
1133
1134	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1135		mips_dcache_wbinv_range(buf, len);
1136		break;
1137
1138	case BUS_DMASYNC_PREREAD:
1139		/*
1140		 * Save buffers that might be modified by invalidation
1141		 */
1142		if (size_cl)
1143			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1144		if (size_clend)
1145			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1146		mips_dcache_inv_range(buf, len);
1147		/*
1148		 * Restore them
1149		 */
1150		if (size_cl)
1151			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1152		if (size_clend)
1153			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1154		/*
1155		 * Copies above have brought corresponding memory
1156		 * cache lines back into dirty state. Write them back
1157		 * out and invalidate affected cache lines again if
1158		 * necessary.
1159		 */
1160		if (size_cl)
1161			mips_dcache_wbinv_range(buf_cl, size_cl);
1162		if (size_clend && (size_cl == 0 ||
1163                    buf_clend - buf_cl > mips_pdcache_linesize))
1164			mips_dcache_wbinv_range(buf_clend, size_clend);
1165		break;
1166
1167	case BUS_DMASYNC_PREWRITE:
1168		mips_dcache_wb_range(buf, len);
1169		break;
1170	}
1171}
1172
1173static void
1174_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1175{
1176	struct bounce_page *bpage;
1177
1178	STAILQ_FOREACH(bpage, &map->bpages, links) {
1179		if (op & BUS_DMASYNC_PREWRITE) {
1180			if (bpage->datavaddr != 0)
1181				bcopy((void *)bpage->datavaddr,
1182				    (void *)(bpage->vaddr_nocache != 0 ?
1183					     bpage->vaddr_nocache :
1184					     bpage->vaddr),
1185				    bpage->datacount);
1186			else
1187				physcopyout(bpage->dataaddr,
1188				    (void *)(bpage->vaddr_nocache != 0 ?
1189					     bpage->vaddr_nocache :
1190					     bpage->vaddr),
1191				    bpage->datacount);
1192			if (bpage->vaddr_nocache == 0) {
1193				mips_dcache_wb_range(bpage->vaddr,
1194				    bpage->datacount);
1195			}
1196			dmat->bounce_zone->total_bounced++;
1197		}
1198		if (op & BUS_DMASYNC_POSTREAD) {
1199			if (bpage->vaddr_nocache == 0) {
1200				mips_dcache_inv_range(bpage->vaddr,
1201				    bpage->datacount);
1202			}
1203			if (bpage->datavaddr != 0)
1204				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1205				    bpage->vaddr_nocache : bpage->vaddr),
1206				    (void *)bpage->datavaddr, bpage->datacount);
1207			else
1208				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1209				    bpage->vaddr_nocache : bpage->vaddr),
1210				    bpage->dataaddr, bpage->datacount);
1211			dmat->bounce_zone->total_bounced++;
1212		}
1213	}
1214}
1215
1216void
1217_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1218{
1219	struct sync_list *sl, *end;
1220	int aligned;
1221
1222	if (op == BUS_DMASYNC_POSTWRITE)
1223		return;
1224	if (STAILQ_FIRST(&map->bpages))
1225		_bus_dmamap_sync_bp(dmat, map, op);
1226
1227	if ((dmat->flags & BUS_DMA_COHERENT) ||
1228	    (map->flags & DMAMAP_UNCACHEABLE)) {
1229		if (op & BUS_DMASYNC_PREWRITE)
1230			mips_sync();
1231		return;
1232	}
1233
1234	aligned = (map->flags & DMAMAP_CACHE_ALIGNED) ? 1 : 0;
1235
1236	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1237	if (map->sync_count) {
1238		end = &map->slist[map->sync_count];
1239		for (sl = &map->slist[0]; sl != end; sl++)
1240			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op,
1241			    aligned);
1242	}
1243}
1244
1245static void
1246init_bounce_pages(void *dummy __unused)
1247{
1248
1249	total_bpages = 0;
1250	STAILQ_INIT(&bounce_zone_list);
1251	STAILQ_INIT(&bounce_map_waitinglist);
1252	STAILQ_INIT(&bounce_map_callbacklist);
1253	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1254}
1255SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1256
1257static struct sysctl_ctx_list *
1258busdma_sysctl_tree(struct bounce_zone *bz)
1259{
1260	return (&bz->sysctl_tree);
1261}
1262
1263static struct sysctl_oid *
1264busdma_sysctl_tree_top(struct bounce_zone *bz)
1265{
1266	return (bz->sysctl_tree_top);
1267}
1268
1269static int
1270alloc_bounce_zone(bus_dma_tag_t dmat)
1271{
1272	struct bounce_zone *bz;
1273
1274	/* Check to see if we already have a suitable zone */
1275	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1276		if ((dmat->alignment <= bz->alignment)
1277		 && (dmat->lowaddr >= bz->lowaddr)) {
1278			dmat->bounce_zone = bz;
1279			return (0);
1280		}
1281	}
1282
1283	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA,
1284	    M_NOWAIT | M_ZERO)) == NULL)
1285		return (ENOMEM);
1286
1287	STAILQ_INIT(&bz->bounce_page_list);
1288	bz->free_bpages = 0;
1289	bz->reserved_bpages = 0;
1290	bz->active_bpages = 0;
1291	bz->lowaddr = dmat->lowaddr;
1292	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1293	bz->map_count = 0;
1294	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1295	busdma_zonecount++;
1296	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1297	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1298	dmat->bounce_zone = bz;
1299
1300	sysctl_ctx_init(&bz->sysctl_tree);
1301	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1302	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1303	    CTLFLAG_RD, 0, "");
1304	if (bz->sysctl_tree_top == NULL) {
1305		sysctl_ctx_free(&bz->sysctl_tree);
1306		return (0);	/* XXX error code? */
1307	}
1308
1309	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1310	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1311	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1312	    "Total bounce pages");
1313	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1314	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1315	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1316	    "Free bounce pages");
1317	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1318	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1319	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1320	    "Reserved bounce pages");
1321	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1322	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1323	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1324	    "Active bounce pages");
1325	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1326	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1327	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1328	    "Total bounce requests");
1329	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1330	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1331	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1332	    "Total bounce requests that were deferred");
1333	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1334	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1335	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1336	SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz),
1337	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1338	    "alignment", CTLFLAG_RD, &bz->alignment, "");
1339
1340	return (0);
1341}
1342
1343static int
1344alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1345{
1346	struct bounce_zone *bz;
1347	int count;
1348
1349	bz = dmat->bounce_zone;
1350	count = 0;
1351	while (numpages > 0) {
1352		struct bounce_page *bpage;
1353
1354		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA,
1355						     M_NOWAIT | M_ZERO);
1356
1357		if (bpage == NULL)
1358			break;
1359		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE,
1360							 M_NOWAIT, 0ul,
1361							 bz->lowaddr,
1362							 PAGE_SIZE,
1363							 0);
1364		if (bpage->vaddr == 0) {
1365			free(bpage, M_BUSDMA);
1366			break;
1367		}
1368		bpage->busaddr = pmap_kextract(bpage->vaddr);
1369		bpage->vaddr_nocache =
1370		    (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
1371		mtx_lock(&bounce_lock);
1372		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1373		total_bpages++;
1374		bz->total_bpages++;
1375		bz->free_bpages++;
1376		mtx_unlock(&bounce_lock);
1377		count++;
1378		numpages--;
1379	}
1380	return (count);
1381}
1382
1383static int
1384reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1385{
1386	struct bounce_zone *bz;
1387	int pages;
1388
1389	mtx_assert(&bounce_lock, MA_OWNED);
1390	bz = dmat->bounce_zone;
1391	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1392	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1393		return (map->pagesneeded - (map->pagesreserved + pages));
1394	bz->free_bpages -= pages;
1395	bz->reserved_bpages += pages;
1396	map->pagesreserved += pages;
1397	pages = map->pagesneeded - map->pagesreserved;
1398
1399	return (pages);
1400}
1401
1402static bus_addr_t
1403add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1404		bus_addr_t addr, bus_size_t size)
1405{
1406	struct bounce_zone *bz;
1407	struct bounce_page *bpage;
1408
1409	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1410	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1411
1412	bz = dmat->bounce_zone;
1413	if (map->pagesneeded == 0)
1414		panic("add_bounce_page: map doesn't need any pages");
1415	map->pagesneeded--;
1416
1417	if (map->pagesreserved == 0)
1418		panic("add_bounce_page: map doesn't need any pages");
1419	map->pagesreserved--;
1420
1421	mtx_lock(&bounce_lock);
1422	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1423	if (bpage == NULL)
1424		panic("add_bounce_page: free page list is empty");
1425
1426	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1427	bz->reserved_bpages--;
1428	bz->active_bpages++;
1429	mtx_unlock(&bounce_lock);
1430
1431	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1432		/* Page offset needs to be preserved. */
1433		bpage->vaddr |= addr & PAGE_MASK;
1434		bpage->busaddr |= addr & PAGE_MASK;
1435	}
1436	bpage->datavaddr = vaddr;
1437	bpage->dataaddr = addr;
1438	bpage->datacount = size;
1439	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1440	return (bpage->busaddr);
1441}
1442
1443static void
1444free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1445{
1446	struct bus_dmamap *map;
1447	struct bounce_zone *bz;
1448
1449	bz = dmat->bounce_zone;
1450	bpage->datavaddr = 0;
1451	bpage->datacount = 0;
1452	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1453		/*
1454		 * Reset the bounce page to start at offset 0.  Other uses
1455		 * of this bounce page may need to store a full page of
1456		 * data and/or assume it starts on a page boundary.
1457		 */
1458		bpage->vaddr &= ~PAGE_MASK;
1459		bpage->busaddr &= ~PAGE_MASK;
1460	}
1461
1462	mtx_lock(&bounce_lock);
1463	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1464	bz->free_bpages++;
1465	bz->active_bpages--;
1466	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1467		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1468			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1469			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1470					   map, links);
1471			busdma_swi_pending = 1;
1472			bz->total_deferred++;
1473			swi_sched(vm_ih, 0);
1474		}
1475	}
1476	mtx_unlock(&bounce_lock);
1477}
1478
1479void
1480busdma_swi(void)
1481{
1482	bus_dma_tag_t dmat;
1483	struct bus_dmamap *map;
1484
1485	mtx_lock(&bounce_lock);
1486	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1487		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1488		mtx_unlock(&bounce_lock);
1489		dmat = map->dmat;
1490		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1491		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1492		    map->callback_arg, BUS_DMA_WAITOK);
1493		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1494		mtx_lock(&bounce_lock);
1495	}
1496	mtx_unlock(&bounce_lock);
1497}
1498