busdma_machdep.c revision 257228
1/*-
2 * Copyright (c) 2006 Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 257228 2013-10-27 21:39:16Z kib $");
31
32/*
33 * MIPS bus dma support routines
34 */
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/malloc.h>
39#include <sys/bus.h>
40#include <sys/interrupt.h>
41#include <sys/lock.h>
42#include <sys/proc.h>
43#include <sys/memdesc.h>
44#include <sys/mutex.h>
45#include <sys/ktr.h>
46#include <sys/kernel.h>
47#include <sys/sysctl.h>
48#include <sys/uio.h>
49
50#include <vm/vm.h>
51#include <vm/vm_page.h>
52#include <vm/vm_map.h>
53
54#include <machine/atomic.h>
55#include <machine/bus.h>
56#include <machine/cache.h>
57#include <machine/cpufunc.h>
58#include <machine/cpuinfo.h>
59#include <machine/md_var.h>
60
61#define MAX_BPAGES 64
62#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
63#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
64
65struct bounce_zone;
66
67struct bus_dma_tag {
68	bus_dma_tag_t		parent;
69	bus_size_t		alignment;
70	bus_addr_t		boundary;
71	bus_addr_t		lowaddr;
72	bus_addr_t		highaddr;
73	bus_dma_filter_t	*filter;
74	void			*filterarg;
75	bus_size_t		maxsize;
76	u_int			nsegments;
77	bus_size_t		maxsegsz;
78	int			flags;
79	int			ref_count;
80	int			map_count;
81	bus_dma_lock_t		*lockfunc;
82	void			*lockfuncarg;
83	bus_dma_segment_t	*segments;
84	struct bounce_zone *bounce_zone;
85};
86
87struct bounce_page {
88	vm_offset_t	vaddr;		/* kva of bounce buffer */
89	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
90	bus_addr_t	busaddr;	/* Physical address */
91	vm_offset_t	datavaddr;	/* kva of client data */
92	bus_addr_t	dataaddr;	/* client physical address */
93	bus_size_t	datacount;	/* client data count */
94	STAILQ_ENTRY(bounce_page) links;
95};
96
97struct sync_list {
98	vm_offset_t	vaddr;		/* kva of bounce buffer */
99	bus_addr_t	busaddr;	/* Physical address */
100	bus_size_t	datacount;	/* client data count */
101};
102
103int busdma_swi_pending;
104
105struct bounce_zone {
106	STAILQ_ENTRY(bounce_zone) links;
107	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
108	int		total_bpages;
109	int		free_bpages;
110	int		reserved_bpages;
111	int		active_bpages;
112	int		total_bounced;
113	int		total_deferred;
114	int		map_count;
115	bus_size_t	alignment;
116	bus_addr_t	lowaddr;
117	char		zoneid[8];
118	char		lowaddrid[20];
119	struct sysctl_ctx_list sysctl_tree;
120	struct sysctl_oid *sysctl_tree_top;
121};
122
123static struct mtx bounce_lock;
124static int total_bpages;
125static int busdma_zonecount;
126static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
127
128static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
129SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
130	   "Total bounce pages");
131
132#define DMAMAP_UNCACHEABLE	0x8
133#define DMAMAP_ALLOCATED	0x10
134#define DMAMAP_MALLOCUSED	0x20
135
136struct bus_dmamap {
137	struct bp_list	bpages;
138	int		pagesneeded;
139	int		pagesreserved;
140	bus_dma_tag_t	dmat;
141	struct memdesc	mem;
142	int		flags;
143	void		*origbuffer;
144	void		*allocbuffer;
145	TAILQ_ENTRY(bus_dmamap)	freelist;
146	STAILQ_ENTRY(bus_dmamap) links;
147	bus_dmamap_callback_t *callback;
148	void		*callback_arg;
149	int		sync_count;
150	struct sync_list *slist;
151};
152
153static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
154static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
155
156static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
157	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
158
159#define BUSDMA_STATIC_MAPS	128
160static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
161
162static struct mtx busdma_mtx;
163
164MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
165
166static void init_bounce_pages(void *dummy);
167static int alloc_bounce_zone(bus_dma_tag_t dmat);
168static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
169static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
170				int commit);
171static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
172				  vm_offset_t vaddr, bus_addr_t addr,
173				  bus_size_t size);
174static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
175
176/* Default tag, as most drivers provide no parent tag. */
177bus_dma_tag_t mips_root_dma_tag;
178
179/*
180 * Return true if a match is made.
181 *
182 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
183 *
184 * If paddr is within the bounds of the dma tag then call the filter callback
185 * to check for a match, if there is no filter callback then assume a match.
186 */
187static int
188run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
189{
190	int retval;
191
192	retval = 0;
193
194	do {
195		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
196		 || ((paddr & (dmat->alignment - 1)) != 0))
197		 && (dmat->filter == NULL
198		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
199			retval = 1;
200
201		dmat = dmat->parent;
202	} while (retval == 0 && dmat != NULL);
203	return (retval);
204}
205
206static void
207mips_dmamap_freelist_init(void *dummy)
208{
209	int i;
210
211	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
212		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
213}
214
215SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
216
217/*
218 * Check to see if the specified page is in an allowed DMA range.
219 */
220
221static __inline int
222_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
223{
224	int i;
225	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
226		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
227		    || (lowaddr < phys_avail[i] &&
228		    highaddr > phys_avail[i]))
229			return (1);
230	}
231	return (0);
232}
233
234/*
235 * Convenience function for manipulating driver locks from busdma (during
236 * busdma_swi, for example).  Drivers that don't provide their own locks
237 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
238 * non-mutex locking scheme don't have to use this at all.
239 */
240void
241busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
242{
243	struct mtx *dmtx;
244
245	dmtx = (struct mtx *)arg;
246	switch (op) {
247	case BUS_DMA_LOCK:
248		mtx_lock(dmtx);
249		break;
250	case BUS_DMA_UNLOCK:
251		mtx_unlock(dmtx);
252		break;
253	default:
254		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
255	}
256}
257
258/*
259 * dflt_lock should never get called.  It gets put into the dma tag when
260 * lockfunc == NULL, which is only valid if the maps that are associated
261 * with the tag are meant to never be defered.
262 * XXX Should have a way to identify which driver is responsible here.
263 */
264static void
265dflt_lock(void *arg, bus_dma_lock_op_t op)
266{
267#ifdef INVARIANTS
268	panic("driver error: busdma dflt_lock called");
269#else
270	printf("DRIVER_ERROR: busdma dflt_lock called\n");
271#endif
272}
273
274static __inline bus_dmamap_t
275_busdma_alloc_dmamap(bus_dma_tag_t dmat)
276{
277	struct sync_list *slist;
278	bus_dmamap_t map;
279
280	slist = malloc(sizeof(*slist) * dmat->nsegments, M_DEVBUF, M_NOWAIT);
281	if (slist == NULL)
282		return (NULL);
283	mtx_lock(&busdma_mtx);
284	map = TAILQ_FIRST(&dmamap_freelist);
285	if (map)
286		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
287	mtx_unlock(&busdma_mtx);
288	if (!map) {
289		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
290		if (map)
291			map->flags = DMAMAP_ALLOCATED;
292	} else
293		map->flags = 0;
294	if (map != NULL) {
295		STAILQ_INIT(&map->bpages);
296		map->slist = slist;
297	} else
298		free(slist, M_DEVBUF);
299	return (map);
300}
301
302static __inline void
303_busdma_free_dmamap(bus_dmamap_t map)
304{
305	free(map->slist, M_DEVBUF);
306	if (map->flags & DMAMAP_ALLOCATED)
307		free(map, M_DEVBUF);
308	else {
309		mtx_lock(&busdma_mtx);
310		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
311		mtx_unlock(&busdma_mtx);
312	}
313}
314
315/*
316 * Allocate a device specific dma_tag.
317 */
318#define SEG_NB 1024
319
320int
321bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
322    bus_addr_t boundary, bus_addr_t lowaddr,
323    bus_addr_t highaddr, bus_dma_filter_t *filter,
324    void *filterarg, bus_size_t maxsize, int nsegments,
325    bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
326    void *lockfuncarg, bus_dma_tag_t *dmat)
327{
328	bus_dma_tag_t newtag;
329	int error = 0;
330	/* Return a NULL tag on failure */
331	*dmat = NULL;
332	if (!parent)
333		parent = mips_root_dma_tag;
334
335	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
336	if (newtag == NULL) {
337		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
338		    __func__, newtag, 0, error);
339		return (ENOMEM);
340	}
341
342	newtag->parent = parent;
343	newtag->alignment = alignment;
344	newtag->boundary = boundary;
345	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
346	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
347	newtag->filter = filter;
348	newtag->filterarg = filterarg;
349	newtag->maxsize = maxsize;
350	newtag->nsegments = nsegments;
351	newtag->maxsegsz = maxsegsz;
352	newtag->flags = flags;
353	if (cpuinfo.cache_coherent_dma)
354		newtag->flags |= BUS_DMA_COHERENT;
355	newtag->ref_count = 1; /* Count ourself */
356	newtag->map_count = 0;
357	if (lockfunc != NULL) {
358		newtag->lockfunc = lockfunc;
359		newtag->lockfuncarg = lockfuncarg;
360	} else {
361		newtag->lockfunc = dflt_lock;
362		newtag->lockfuncarg = NULL;
363	}
364	newtag->segments = NULL;
365
366	/*
367	 * Take into account any restrictions imposed by our parent tag
368	 */
369	if (parent != NULL) {
370		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
371		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
372		if (newtag->boundary == 0)
373			newtag->boundary = parent->boundary;
374		else if (parent->boundary != 0)
375			newtag->boundary =
376			    MIN(parent->boundary, newtag->boundary);
377		if ((newtag->filter != NULL) ||
378		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
379			newtag->flags |= BUS_DMA_COULD_BOUNCE;
380		if (newtag->filter == NULL) {
381			/*
382			* Short circuit looking at our parent directly
383			* since we have encapsulated all of its information
384			*/
385			newtag->filter = parent->filter;
386			newtag->filterarg = parent->filterarg;
387			newtag->parent = parent->parent;
388		}
389		if (newtag->parent != NULL)
390			atomic_add_int(&parent->ref_count, 1);
391	}
392	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
393	 || newtag->alignment > 1)
394		newtag->flags |= BUS_DMA_COULD_BOUNCE;
395
396	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
397	    (flags & BUS_DMA_ALLOCNOW) != 0) {
398		struct bounce_zone *bz;
399
400		/* Must bounce */
401
402		if ((error = alloc_bounce_zone(newtag)) != 0) {
403			free(newtag, M_DEVBUF);
404			return (error);
405		}
406		bz = newtag->bounce_zone;
407
408		if (ptoa(bz->total_bpages) < maxsize) {
409			int pages;
410
411			pages = atop(maxsize) - bz->total_bpages;
412
413			/* Add pages to our bounce pool */
414			if (alloc_bounce_pages(newtag, pages) < pages)
415				error = ENOMEM;
416		}
417		/* Performed initial allocation */
418		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
419	} else
420		newtag->bounce_zone = NULL;
421	if (error != 0)
422		free(newtag, M_DEVBUF);
423	else
424		*dmat = newtag;
425	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
426	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
427
428	return (error);
429}
430
431int
432bus_dma_tag_destroy(bus_dma_tag_t dmat)
433{
434#ifdef KTR
435	bus_dma_tag_t dmat_copy = dmat;
436#endif
437
438	if (dmat != NULL) {
439		if (dmat->map_count != 0)
440			return (EBUSY);
441
442		while (dmat != NULL) {
443			bus_dma_tag_t parent;
444
445			parent = dmat->parent;
446			atomic_subtract_int(&dmat->ref_count, 1);
447			if (dmat->ref_count == 0) {
448				if (dmat->segments != NULL)
449					free(dmat->segments, M_DEVBUF);
450				free(dmat, M_DEVBUF);
451				/*
452				 * Last reference count, so
453				 * release our reference
454				 * count on our parent.
455				 */
456				dmat = parent;
457			} else
458				dmat = NULL;
459		}
460	}
461	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
462
463	return (0);
464}
465
466#include <sys/kdb.h>
467/*
468 * Allocate a handle for mapping from kva/uva/physical
469 * address space into bus device space.
470 */
471int
472bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
473{
474	bus_dmamap_t newmap;
475	int error = 0;
476
477	if (dmat->segments == NULL) {
478		dmat->segments = (bus_dma_segment_t *)malloc(
479		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
480		    M_NOWAIT);
481		if (dmat->segments == NULL) {
482			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
483			    __func__, dmat, ENOMEM);
484			return (ENOMEM);
485		}
486	}
487
488	newmap = _busdma_alloc_dmamap(dmat);
489	if (newmap == NULL) {
490		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
491		return (ENOMEM);
492	}
493	*mapp = newmap;
494	newmap->dmat = dmat;
495	newmap->allocbuffer = NULL;
496	newmap->sync_count = 0;
497	dmat->map_count++;
498
499	/*
500	 * Bouncing might be required if the driver asks for an active
501	 * exclusion region, a data alignment that is stricter than 1, and/or
502	 * an active address boundary.
503	 */
504	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
505
506		/* Must bounce */
507		struct bounce_zone *bz;
508		int maxpages;
509
510		if (dmat->bounce_zone == NULL) {
511			if ((error = alloc_bounce_zone(dmat)) != 0) {
512				_busdma_free_dmamap(newmap);
513				*mapp = NULL;
514				return (error);
515			}
516		}
517		bz = dmat->bounce_zone;
518
519		/* Initialize the new map */
520		STAILQ_INIT(&((*mapp)->bpages));
521
522		/*
523		 * Attempt to add pages to our pool on a per-instance
524		 * basis up to a sane limit.
525		 */
526		maxpages = MAX_BPAGES;
527		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
528		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
529			int pages;
530
531			pages = MAX(atop(dmat->maxsize), 1);
532			pages = MIN(maxpages - bz->total_bpages, pages);
533			pages = MAX(pages, 1);
534			if (alloc_bounce_pages(dmat, pages) < pages)
535				error = ENOMEM;
536
537			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
538				if (error == 0)
539					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
540			} else {
541				error = 0;
542			}
543		}
544		bz->map_count++;
545	}
546
547	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
548	    __func__, dmat, dmat->flags, error);
549
550	return (0);
551}
552
553/*
554 * Destroy a handle for mapping from kva/uva/physical
555 * address space into bus device space.
556 */
557int
558bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
559{
560
561	if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
562		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
563		    __func__, dmat, EBUSY);
564		return (EBUSY);
565	}
566	if (dmat->bounce_zone)
567		dmat->bounce_zone->map_count--;
568        dmat->map_count--;
569	_busdma_free_dmamap(map);
570	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
571        return (0);
572}
573
574/*
575 * Allocate a piece of memory that can be efficiently mapped into
576 * bus device space based on the constraints lited in the dma tag.
577 * A dmamap to for use with dmamap_load is also allocated.
578 */
579int
580bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
581    bus_dmamap_t *mapp)
582{
583	bus_dmamap_t newmap = NULL;
584
585	int mflags;
586
587	if (flags & BUS_DMA_NOWAIT)
588		mflags = M_NOWAIT;
589	else
590		mflags = M_WAITOK;
591	if (dmat->segments == NULL) {
592		dmat->segments = (bus_dma_segment_t *)malloc(
593		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
594		    mflags);
595		if (dmat->segments == NULL) {
596			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
597			    __func__, dmat, dmat->flags, ENOMEM);
598			return (ENOMEM);
599		}
600	}
601	if (flags & BUS_DMA_ZERO)
602		mflags |= M_ZERO;
603
604	newmap = _busdma_alloc_dmamap(dmat);
605	if (newmap == NULL) {
606		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
607		    __func__, dmat, dmat->flags, ENOMEM);
608		return (ENOMEM);
609	}
610	dmat->map_count++;
611	*mapp = newmap;
612	newmap->dmat = dmat;
613	newmap->sync_count = 0;
614
615	/*
616	 * If all the memory is coherent with DMA then we don't need to
617	 * do anything special for a coherent mapping request.
618	 */
619	if (dmat->flags & BUS_DMA_COHERENT)
620	    flags &= ~BUS_DMA_COHERENT;
621
622	/*
623	 * Allocate uncacheable memory if all else fails.
624	 */
625	if (flags & BUS_DMA_COHERENT)
626	    newmap->flags |= DMAMAP_UNCACHEABLE;
627
628	if (dmat->maxsize <= PAGE_SIZE &&
629	   (dmat->alignment < dmat->maxsize) &&
630	   !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) &&
631	   !(newmap->flags & DMAMAP_UNCACHEABLE)) {
632                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
633		newmap->flags |= DMAMAP_MALLOCUSED;
634	} else {
635		/*
636		 * XXX Use Contigmalloc until it is merged into this facility
637		 *     and handles multi-seg allocations.  Nobody is doing
638		 *     multi-seg allocations yet though.
639		 */
640		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
641		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
642		    dmat->boundary);
643	}
644	if (*vaddr == NULL) {
645		if (newmap != NULL) {
646			_busdma_free_dmamap(newmap);
647			dmat->map_count--;
648		}
649		*mapp = NULL;
650		return (ENOMEM);
651	}
652
653	if (newmap->flags & DMAMAP_UNCACHEABLE) {
654		void *tmpaddr = (void *)*vaddr;
655
656		if (tmpaddr) {
657			tmpaddr = (void *)pmap_mapdev(vtophys(tmpaddr),
658			    dmat->maxsize);
659			newmap->origbuffer = *vaddr;
660			newmap->allocbuffer = tmpaddr;
661			mips_dcache_wbinv_range((vm_offset_t)*vaddr,
662			    dmat->maxsize);
663			*vaddr = tmpaddr;
664		} else
665			newmap->origbuffer = newmap->allocbuffer = NULL;
666	} else
667		newmap->origbuffer = newmap->allocbuffer = NULL;
668
669	return (0);
670}
671
672/*
673 * Free a piece of memory and it's allocated dmamap, that was allocated
674 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
675 */
676void
677bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
678{
679	if (map->allocbuffer) {
680		KASSERT(map->allocbuffer == vaddr,
681		    ("Trying to freeing the wrong DMA buffer"));
682		vaddr = map->origbuffer;
683	}
684
685	if (map->flags & DMAMAP_UNCACHEABLE)
686		pmap_unmapdev((vm_offset_t)map->allocbuffer, dmat->maxsize);
687	if (map->flags & DMAMAP_MALLOCUSED)
688		free(vaddr, M_DEVBUF);
689	else
690		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
691
692	dmat->map_count--;
693	_busdma_free_dmamap(map);
694	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
695}
696
697static void
698_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
699    bus_size_t buflen, int flags)
700{
701	bus_addr_t curaddr;
702	bus_size_t sgsize;
703
704	if ((map->pagesneeded == 0)) {
705		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
706		    dmat->lowaddr, dmat->boundary, dmat->alignment);
707		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
708		    map, map->pagesneeded);
709		/*
710		 * Count the number of bounce pages
711		 * needed in order to complete this transfer
712		 */
713		curaddr = buf;
714		while (buflen != 0) {
715			sgsize = MIN(buflen, dmat->maxsegsz);
716			if (run_filter(dmat, curaddr) != 0) {
717				sgsize = MIN(sgsize, PAGE_SIZE);
718				map->pagesneeded++;
719			}
720			curaddr += sgsize;
721			buflen -= sgsize;
722		}
723		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
724	}
725}
726
727static void
728_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
729    void *buf, bus_size_t buflen, int flags)
730{
731	vm_offset_t vaddr;
732	vm_offset_t vendaddr;
733	bus_addr_t paddr;
734
735	if ((map->pagesneeded == 0)) {
736		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
737		    dmat->lowaddr, dmat->boundary, dmat->alignment);
738		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
739		    map, map->pagesneeded);
740		/*
741		 * Count the number of bounce pages
742		 * needed in order to complete this transfer
743		 */
744		vaddr = (vm_offset_t)buf;
745		vendaddr = (vm_offset_t)buf + buflen;
746
747		while (vaddr < vendaddr) {
748			bus_size_t sg_len;
749
750			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
751			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
752			paddr = pmap_kextract(vaddr);
753			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
754			    run_filter(dmat, paddr) != 0) {
755				sg_len = roundup2(sg_len, dmat->alignment);
756				map->pagesneeded++;
757			}
758			vaddr += sg_len;
759		}
760		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
761	}
762}
763
764static int
765_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map,int flags)
766{
767
768	/* Reserve Necessary Bounce Pages */
769	mtx_lock(&bounce_lock);
770	if (flags & BUS_DMA_NOWAIT) {
771		if (reserve_bounce_pages(dmat, map, 0) != 0) {
772			mtx_unlock(&bounce_lock);
773			return (ENOMEM);
774		}
775	} else {
776		if (reserve_bounce_pages(dmat, map, 1) != 0) {
777			/* Queue us for resources */
778			STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
779			    map, links);
780			mtx_unlock(&bounce_lock);
781			return (EINPROGRESS);
782		}
783	}
784	mtx_unlock(&bounce_lock);
785
786	return (0);
787}
788
789/*
790 * Add a single contiguous physical range to the segment list.
791 */
792static int
793_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr,
794    bus_size_t sgsize, bus_dma_segment_t *segs, int *segp)
795{
796	bus_addr_t baddr, bmask;
797	int seg;
798
799	/*
800	 * Make sure we don't cross any boundaries.
801	 */
802	bmask = ~(dmat->boundary - 1);
803	if (dmat->boundary > 0) {
804		baddr = (curaddr + dmat->boundary) & bmask;
805		if (sgsize > (baddr - curaddr))
806			sgsize = (baddr - curaddr);
807	}
808	/*
809	 * Insert chunk into a segment, coalescing with
810	 * the previous segment if possible.
811	 */
812	seg = *segp;
813	if (seg >= 0 &&
814	    curaddr == segs[seg].ds_addr + segs[seg].ds_len &&
815	    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
816	    (dmat->boundary == 0 ||
817	     (segs[seg].ds_addr & bmask) == (curaddr & bmask))) {
818		segs[seg].ds_len += sgsize;
819	} else {
820		if (++seg >= dmat->nsegments)
821			return (0);
822		segs[seg].ds_addr = curaddr;
823		segs[seg].ds_len = sgsize;
824	}
825	*segp = seg;
826	return (sgsize);
827}
828
829/*
830 * Utility function to load a physical buffer.  segp contains
831 * the starting segment on entrace, and the ending segment on exit.
832 */
833int
834_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
835    vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
836    int *segp)
837{
838	bus_addr_t curaddr;
839	bus_size_t sgsize;
840	int error;
841
842	if (segs == NULL)
843		segs = dmat->segments;
844
845	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
846		_bus_dmamap_count_phys(dmat, map, buf, buflen, flags);
847		if (map->pagesneeded != 0) {
848			error = _bus_dmamap_reserve_pages(dmat, map, flags);
849			if (error)
850				return (error);
851		}
852	}
853
854	while (buflen > 0) {
855		curaddr = buf;
856		sgsize = MIN(buflen, dmat->maxsegsz);
857		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
858		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
859			sgsize = MIN(sgsize, PAGE_SIZE);
860			curaddr = add_bounce_page(dmat, map, 0, curaddr,
861			    sgsize);
862		}
863		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
864		    segp);
865		if (sgsize == 0)
866			break;
867		buf += sgsize;
868		buflen -= sgsize;
869	}
870
871	/*
872	 * Did we fit?
873	 */
874	if (buflen != 0) {
875		_bus_dmamap_unload(dmat, map);
876		return (EFBIG); /* XXX better return value here? */
877	}
878	return (0);
879}
880
881int
882_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map,
883    struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags,
884    bus_dma_segment_t *segs, int *segp)
885{
886
887	return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags,
888	    segs, segp));
889}
890
891/*
892 * Utility function to load a linear buffer.  segp contains
893 * the starting segment on entrance, and the ending segment on exit.
894 * first indicates if this is the first invocation of this function.
895 */
896int
897_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
898    bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs,
899    int *segp)
900{
901	bus_size_t sgsize;
902	bus_addr_t curaddr;
903	struct sync_list *sl;
904	vm_offset_t vaddr = (vm_offset_t)buf;
905	int error = 0;
906
907
908	if (segs == NULL)
909		segs = dmat->segments;
910
911	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
912		_bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
913		if (map->pagesneeded != 0) {
914			error = _bus_dmamap_reserve_pages(dmat, map, flags);
915			if (error)
916				return (error);
917		}
918	}
919	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
920	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
921
922	while (buflen > 0) {
923		/*
924		 * Get the physical address for this segment.
925		 *
926		 * XXX Don't support checking for coherent mappings
927		 * XXX in user address space.
928		 */
929		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
930		curaddr = pmap_kextract(vaddr);
931
932		/*
933		 * Compute the segment size, and adjust counts.
934		 */
935		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
936		if (sgsize > dmat->maxsegsz)
937			sgsize = dmat->maxsegsz;
938		if (buflen < sgsize)
939			sgsize = buflen;
940
941		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
942		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
943			curaddr = add_bounce_page(dmat, map, vaddr, curaddr,
944			    sgsize);
945		} else {
946			sl = &map->slist[map->sync_count - 1];
947			if (map->sync_count == 0 ||
948			    vaddr != sl->vaddr + sl->datacount) {
949				if (++map->sync_count > dmat->nsegments)
950					goto cleanup;
951				sl++;
952				sl->vaddr = vaddr;
953				sl->datacount = sgsize;
954				sl->busaddr = curaddr;
955			} else
956				sl->datacount += sgsize;
957		}
958		sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
959		    segp);
960		if (sgsize == 0)
961			break;
962		vaddr += sgsize;
963		buflen -= sgsize;
964	}
965
966cleanup:
967	/*
968	 * Did we fit?
969	 */
970	if (buflen != 0) {
971		_bus_dmamap_unload(dmat, map);
972		error = EFBIG; /* XXX better return value here? */
973	}
974	return (error);
975}
976
977void
978__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
979    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
980{
981
982	KASSERT(dmat != NULL, ("dmatag is NULL"));
983	KASSERT(map != NULL, ("dmamap is NULL"));
984	map->mem = *mem;
985	map->callback = callback;
986	map->callback_arg = callback_arg;
987}
988
989bus_dma_segment_t *
990_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
991    bus_dma_segment_t *segs, int nsegs, int error)
992{
993
994	if (segs == NULL)
995		segs = dmat->segments;
996	return (segs);
997}
998
999/*
1000 * Release the mapping held by map.
1001 */
1002void
1003_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1004{
1005	struct bounce_page *bpage;
1006
1007	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1008		STAILQ_REMOVE_HEAD(&map->bpages, links);
1009		free_bounce_page(dmat, bpage);
1010	}
1011	map->sync_count = 0;
1012	return;
1013}
1014
1015static void
1016bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op)
1017{
1018	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1019	vm_offset_t buf_cl, buf_clend;
1020	vm_size_t size_cl, size_clend;
1021	int cache_linesize_mask = mips_pdcache_linesize - 1;
1022
1023	/*
1024	 * dcache invalidation operates on cache line aligned addresses
1025	 * and could modify areas of memory that share the same cache line
1026	 * at the beginning and the ending of the buffer. In order to
1027	 * prevent a data loss we save these chunks in temporary buffer
1028	 * before invalidation and restore them afer it
1029	 */
1030	buf_cl = buf & ~cache_linesize_mask;
1031	size_cl = buf & cache_linesize_mask;
1032	buf_clend = buf + len;
1033	size_clend = (mips_pdcache_linesize -
1034	    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1035
1036	switch (op) {
1037	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1038	case BUS_DMASYNC_POSTREAD:
1039
1040		/*
1041		 * Save buffers that might be modified by invalidation
1042		 */
1043		if (size_cl)
1044			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1045		if (size_clend)
1046			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1047		mips_dcache_inv_range(buf, len);
1048		/*
1049		 * Restore them
1050		 */
1051		if (size_cl)
1052			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1053		if (size_clend)
1054			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1055		/*
1056		 * Copies above have brought corresponding memory
1057		 * cache lines back into dirty state. Write them back
1058		 * out and invalidate affected cache lines again if
1059		 * necessary.
1060		 */
1061		if (size_cl)
1062			mips_dcache_wbinv_range(buf_cl, size_cl);
1063		if (size_clend && (size_cl == 0 ||
1064                    buf_clend - buf_cl > mips_pdcache_linesize))
1065			mips_dcache_wbinv_range(buf_clend, size_clend);
1066		break;
1067
1068	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1069		mips_dcache_wbinv_range(buf_cl, len);
1070		break;
1071
1072	case BUS_DMASYNC_PREREAD:
1073		/*
1074		 * Save buffers that might be modified by invalidation
1075		 */
1076		if (size_cl)
1077			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1078		if (size_clend)
1079			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1080		mips_dcache_inv_range(buf, len);
1081		/*
1082		 * Restore them
1083		 */
1084		if (size_cl)
1085			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1086		if (size_clend)
1087			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1088		/*
1089		 * Copies above have brought corresponding memory
1090		 * cache lines back into dirty state. Write them back
1091		 * out and invalidate affected cache lines again if
1092		 * necessary.
1093		 */
1094		if (size_cl)
1095			mips_dcache_wbinv_range(buf_cl, size_cl);
1096		if (size_clend && (size_cl == 0 ||
1097                    buf_clend - buf_cl > mips_pdcache_linesize))
1098			mips_dcache_wbinv_range(buf_clend, size_clend);
1099		break;
1100
1101	case BUS_DMASYNC_PREWRITE:
1102		mips_dcache_wb_range(buf, len);
1103		break;
1104	}
1105}
1106
1107static void
1108_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1109{
1110	struct bounce_page *bpage;
1111
1112	STAILQ_FOREACH(bpage, &map->bpages, links) {
1113		if (op & BUS_DMASYNC_PREWRITE) {
1114			if (bpage->datavaddr != 0)
1115				bcopy((void *)bpage->datavaddr,
1116				    (void *)(bpage->vaddr_nocache != 0 ?
1117					     bpage->vaddr_nocache :
1118					     bpage->vaddr),
1119				    bpage->datacount);
1120			else
1121				physcopyout(bpage->dataaddr,
1122				    (void *)(bpage->vaddr_nocache != 0 ?
1123					     bpage->vaddr_nocache :
1124					     bpage->vaddr),
1125				    bpage->datacount);
1126			if (bpage->vaddr_nocache == 0) {
1127				mips_dcache_wb_range(bpage->vaddr,
1128				    bpage->datacount);
1129			}
1130			dmat->bounce_zone->total_bounced++;
1131		}
1132		if (op & BUS_DMASYNC_POSTREAD) {
1133			if (bpage->vaddr_nocache == 0) {
1134				mips_dcache_inv_range(bpage->vaddr,
1135				    bpage->datacount);
1136			}
1137			if (bpage->datavaddr != 0)
1138				bcopy((void *)(bpage->vaddr_nocache != 0 ?
1139				    bpage->vaddr_nocache : bpage->vaddr),
1140				    (void *)bpage->datavaddr, bpage->datacount);
1141			else
1142				physcopyin((void *)(bpage->vaddr_nocache != 0 ?
1143				    bpage->vaddr_nocache : bpage->vaddr),
1144				    bpage->dataaddr, bpage->datacount);
1145			dmat->bounce_zone->total_bounced++;
1146		}
1147	}
1148}
1149
1150void
1151_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1152{
1153	struct sync_list *sl, *end;
1154
1155	if (op == BUS_DMASYNC_POSTWRITE)
1156		return;
1157	if (STAILQ_FIRST(&map->bpages))
1158		_bus_dmamap_sync_bp(dmat, map, op);
1159
1160	if (dmat->flags & BUS_DMA_COHERENT)
1161		return;
1162
1163	if (map->flags & DMAMAP_UNCACHEABLE)
1164		return;
1165
1166	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1167	if (map->sync_count) {
1168		end = &map->slist[map->sync_count];
1169		for (sl = &map->slist[0]; sl != end; sl++)
1170			bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op);
1171	}
1172}
1173
1174static void
1175init_bounce_pages(void *dummy __unused)
1176{
1177
1178	total_bpages = 0;
1179	STAILQ_INIT(&bounce_zone_list);
1180	STAILQ_INIT(&bounce_map_waitinglist);
1181	STAILQ_INIT(&bounce_map_callbacklist);
1182	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1183}
1184SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1185
1186static struct sysctl_ctx_list *
1187busdma_sysctl_tree(struct bounce_zone *bz)
1188{
1189	return (&bz->sysctl_tree);
1190}
1191
1192static struct sysctl_oid *
1193busdma_sysctl_tree_top(struct bounce_zone *bz)
1194{
1195	return (bz->sysctl_tree_top);
1196}
1197
1198static int
1199alloc_bounce_zone(bus_dma_tag_t dmat)
1200{
1201	struct bounce_zone *bz;
1202
1203	/* Check to see if we already have a suitable zone */
1204	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1205		if ((dmat->alignment <= bz->alignment)
1206		 && (dmat->lowaddr >= bz->lowaddr)) {
1207			dmat->bounce_zone = bz;
1208			return (0);
1209		}
1210	}
1211
1212	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1213	    M_NOWAIT | M_ZERO)) == NULL)
1214		return (ENOMEM);
1215
1216	STAILQ_INIT(&bz->bounce_page_list);
1217	bz->free_bpages = 0;
1218	bz->reserved_bpages = 0;
1219	bz->active_bpages = 0;
1220	bz->lowaddr = dmat->lowaddr;
1221	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1222	bz->map_count = 0;
1223	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1224	busdma_zonecount++;
1225	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1226	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1227	dmat->bounce_zone = bz;
1228
1229	sysctl_ctx_init(&bz->sysctl_tree);
1230	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1231	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1232	    CTLFLAG_RD, 0, "");
1233	if (bz->sysctl_tree_top == NULL) {
1234		sysctl_ctx_free(&bz->sysctl_tree);
1235		return (0);	/* XXX error code? */
1236	}
1237
1238	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1239	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1240	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1241	    "Total bounce pages");
1242	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1243	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1244	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1245	    "Free bounce pages");
1246	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1247	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1248	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1249	    "Reserved bounce pages");
1250	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1251	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1252	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1253	    "Active bounce pages");
1254	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1255	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1256	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1257	    "Total bounce requests");
1258	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1259	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1260	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1261	    "Total bounce requests that were deferred");
1262	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1263	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1264	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1265	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1266	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1267	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1268
1269	return (0);
1270}
1271
1272static int
1273alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1274{
1275	struct bounce_zone *bz;
1276	int count;
1277
1278	bz = dmat->bounce_zone;
1279	count = 0;
1280	while (numpages > 0) {
1281		struct bounce_page *bpage;
1282
1283		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1284						     M_NOWAIT | M_ZERO);
1285
1286		if (bpage == NULL)
1287			break;
1288		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1289							 M_NOWAIT, 0ul,
1290							 bz->lowaddr,
1291							 PAGE_SIZE,
1292							 0);
1293		if (bpage->vaddr == 0) {
1294			free(bpage, M_DEVBUF);
1295			break;
1296		}
1297		bpage->busaddr = pmap_kextract(bpage->vaddr);
1298		bpage->vaddr_nocache =
1299		    (vm_offset_t)pmap_mapdev(bpage->busaddr, PAGE_SIZE);
1300		mtx_lock(&bounce_lock);
1301		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1302		total_bpages++;
1303		bz->total_bpages++;
1304		bz->free_bpages++;
1305		mtx_unlock(&bounce_lock);
1306		count++;
1307		numpages--;
1308	}
1309	return (count);
1310}
1311
1312static int
1313reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1314{
1315	struct bounce_zone *bz;
1316	int pages;
1317
1318	mtx_assert(&bounce_lock, MA_OWNED);
1319	bz = dmat->bounce_zone;
1320	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1321	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1322		return (map->pagesneeded - (map->pagesreserved + pages));
1323	bz->free_bpages -= pages;
1324	bz->reserved_bpages += pages;
1325	map->pagesreserved += pages;
1326	pages = map->pagesneeded - map->pagesreserved;
1327
1328	return (pages);
1329}
1330
1331static bus_addr_t
1332add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1333		bus_addr_t addr, bus_size_t size)
1334{
1335	struct bounce_zone *bz;
1336	struct bounce_page *bpage;
1337
1338	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1339	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1340
1341	bz = dmat->bounce_zone;
1342	if (map->pagesneeded == 0)
1343		panic("add_bounce_page: map doesn't need any pages");
1344	map->pagesneeded--;
1345
1346	if (map->pagesreserved == 0)
1347		panic("add_bounce_page: map doesn't need any pages");
1348	map->pagesreserved--;
1349
1350	mtx_lock(&bounce_lock);
1351	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1352	if (bpage == NULL)
1353		panic("add_bounce_page: free page list is empty");
1354
1355	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1356	bz->reserved_bpages--;
1357	bz->active_bpages++;
1358	mtx_unlock(&bounce_lock);
1359
1360	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1361		/* Page offset needs to be preserved. */
1362		bpage->vaddr |= vaddr & PAGE_MASK;
1363		bpage->busaddr |= vaddr & PAGE_MASK;
1364	}
1365	bpage->datavaddr = vaddr;
1366	bpage->dataaddr = addr;
1367	bpage->datacount = size;
1368	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1369	return (bpage->busaddr);
1370}
1371
1372static void
1373free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1374{
1375	struct bus_dmamap *map;
1376	struct bounce_zone *bz;
1377
1378	bz = dmat->bounce_zone;
1379	bpage->datavaddr = 0;
1380	bpage->datacount = 0;
1381	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1382		/*
1383		 * Reset the bounce page to start at offset 0.  Other uses
1384		 * of this bounce page may need to store a full page of
1385		 * data and/or assume it starts on a page boundary.
1386		 */
1387		bpage->vaddr &= ~PAGE_MASK;
1388		bpage->busaddr &= ~PAGE_MASK;
1389	}
1390
1391	mtx_lock(&bounce_lock);
1392	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1393	bz->free_bpages++;
1394	bz->active_bpages--;
1395	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1396		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1397			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1398			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1399					   map, links);
1400			busdma_swi_pending = 1;
1401			bz->total_deferred++;
1402			swi_sched(vm_ih, 0);
1403		}
1404	}
1405	mtx_unlock(&bounce_lock);
1406}
1407
1408void
1409busdma_swi(void)
1410{
1411	bus_dma_tag_t dmat;
1412	struct bus_dmamap *map;
1413
1414	mtx_lock(&bounce_lock);
1415	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1416		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1417		mtx_unlock(&bounce_lock);
1418		dmat = map->dmat;
1419		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1420		bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback,
1421		    map->callback_arg, BUS_DMA_WAITOK);
1422		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1423		mtx_lock(&bounce_lock);
1424	}
1425	mtx_unlock(&bounce_lock);
1426}
1427