busdma_machdep.c revision 112196
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 112196 2003-03-13 17:18:48Z mux $
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/malloc.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/lock.h>
35#include <sys/proc.h>
36#include <sys/mutex.h>
37#include <sys/mbuf.h>
38#include <sys/uio.h>
39
40#include <vm/vm.h>
41#include <vm/vm_page.h>
42#include <vm/vm_map.h>
43
44#include <machine/bus.h>
45#include <machine/md_var.h>
46
47#define MAX_BPAGES 128
48
49struct bus_dma_tag {
50	bus_dma_tag_t	  parent;
51	bus_size_t	  alignment;
52	bus_size_t	  boundary;
53	bus_addr_t	  lowaddr;
54	bus_addr_t	  highaddr;
55	bus_dma_filter_t *filter;
56	void		 *filterarg;
57	bus_size_t	  maxsize;
58	u_int		  nsegments;
59	bus_size_t	  maxsegsz;
60	int		  flags;
61	int		  ref_count;
62	int		  map_count;
63};
64
65struct bounce_page {
66	vm_offset_t	vaddr;		/* kva of bounce buffer */
67	bus_addr_t	busaddr;	/* Physical address */
68	vm_offset_t	datavaddr;	/* kva of client data */
69	bus_size_t	datacount;	/* client data count */
70	STAILQ_ENTRY(bounce_page) links;
71};
72
73int busdma_swi_pending;
74
75static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
76static int free_bpages;
77static int reserved_bpages;
78static int active_bpages;
79static int total_bpages;
80static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
81
82struct bus_dmamap {
83	struct bp_list	       bpages;
84	int		       pagesneeded;
85	int		       pagesreserved;
86	bus_dma_tag_t	       dmat;
87	void		      *buf;		/* unmapped buffer pointer */
88	bus_size_t	       buflen;		/* unmapped buffer length */
89	bus_dmamap_callback_t *callback;
90	void		      *callback_arg;
91	STAILQ_ENTRY(bus_dmamap) links;
92};
93
94static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
95static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
96static struct bus_dmamap nobounce_dmamap;
97
98static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
99static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
100static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
101				   vm_offset_t vaddr, bus_size_t size);
102static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
103static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
104
105/*
106 * Return true if a match is made.
107 *
108 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
109 *
110 * If paddr is within the bounds of the dma tag then call the filter callback
111 * to check for a match, if there is no filter callback then assume a match.
112 */
113static __inline int
114run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
115{
116	int retval;
117
118	retval = 0;
119	do {
120		if (paddr > dmat->lowaddr
121		 && paddr <= dmat->highaddr
122		 && (dmat->filter == NULL
123		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
124			retval = 1;
125
126		dmat = dmat->parent;
127	} while (retval == 0 && dmat != NULL);
128	return (retval);
129}
130
131#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
132/*
133 * Allocate a device specific dma_tag.
134 */
135int
136bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
137		   bus_size_t boundary, bus_addr_t lowaddr,
138		   bus_addr_t highaddr, bus_dma_filter_t *filter,
139		   void *filterarg, bus_size_t maxsize, int nsegments,
140		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
141{
142	bus_dma_tag_t newtag;
143	int error = 0;
144
145	/* Return a NULL tag on failure */
146	*dmat = NULL;
147
148	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
149	if (newtag == NULL)
150		return (ENOMEM);
151
152	newtag->parent = parent;
153	newtag->alignment = alignment;
154	newtag->boundary = boundary;
155	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
156	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
157	newtag->filter = filter;
158	newtag->filterarg = filterarg;
159	newtag->maxsize = maxsize;
160	newtag->nsegments = nsegments;
161	newtag->maxsegsz = maxsegsz;
162	newtag->flags = flags;
163	newtag->ref_count = 1; /* Count ourself */
164	newtag->map_count = 0;
165
166	/* Take into account any restrictions imposed by our parent tag */
167	if (parent != NULL) {
168		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
169		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
170		/*
171		 * XXX Not really correct??? Probably need to honor boundary
172		 *     all the way up the inheritence chain.
173		 */
174		newtag->boundary = MAX(parent->boundary, newtag->boundary);
175		if (newtag->filter == NULL) {
176			/*
177			 * Short circuit looking at our parent directly
178			 * since we have encapsulated all of its information
179			 */
180			newtag->filter = parent->filter;
181			newtag->filterarg = parent->filterarg;
182			newtag->parent = parent->parent;
183		}
184		if (newtag->parent != NULL) {
185			parent->ref_count++;
186		}
187	}
188
189	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
190		/* Must bounce */
191
192		if (lowaddr > bounce_lowaddr) {
193			/*
194			 * Go through the pool and kill any pages
195			 * that don't reside below lowaddr.
196			 */
197			panic("bus_dma_tag_create: page reallocation "
198			      "not implemented");
199		}
200		if (ptoa(total_bpages) < maxsize) {
201			int pages;
202
203			pages = atop(maxsize) - total_bpages;
204
205			/* Add pages to our bounce pool */
206			if (alloc_bounce_pages(newtag, pages) < pages)
207				error = ENOMEM;
208		}
209		/* Performed initial allocation */
210		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
211	}
212
213	if (error != 0) {
214		free(newtag, M_DEVBUF);
215	} else {
216		*dmat = newtag;
217	}
218	return (error);
219}
220
221int
222bus_dma_tag_destroy(bus_dma_tag_t dmat)
223{
224	if (dmat != NULL) {
225
226		if (dmat->map_count != 0)
227			return (EBUSY);
228
229		while (dmat != NULL) {
230			bus_dma_tag_t parent;
231
232			parent = dmat->parent;
233			dmat->ref_count--;
234			if (dmat->ref_count == 0) {
235				free(dmat, M_DEVBUF);
236				/*
237				 * Last reference count, so
238				 * release our reference
239				 * count on our parent.
240				 */
241				dmat = parent;
242			} else
243				dmat = NULL;
244		}
245	}
246	return (0);
247}
248
249/*
250 * Allocate a handle for mapping from kva/uva/physical
251 * address space into bus device space.
252 */
253int
254bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
255{
256	int error;
257
258	error = 0;
259
260	if (dmat->lowaddr < ptoa(Maxmem)) {
261		/* Must bounce */
262		int maxpages;
263
264		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
265					     M_NOWAIT | M_ZERO);
266		if (*mapp == NULL)
267			return (ENOMEM);
268
269		/* Initialize the new map */
270		STAILQ_INIT(&((*mapp)->bpages));
271
272		/*
273		 * Attempt to add pages to our pool on a per-instance
274		 * basis up to a sane limit.
275		 */
276		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
277		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
278		 || (dmat->map_count > 0
279		  && total_bpages < maxpages)) {
280			int pages;
281
282			if (dmat->lowaddr > bounce_lowaddr) {
283				/*
284				 * Go through the pool and kill any pages
285				 * that don't reside below lowaddr.
286				 */
287				panic("bus_dmamap_create: page reallocation "
288				      "not implemented");
289			}
290			pages = atop(dmat->maxsize);
291			pages = MIN(maxpages - total_bpages, pages);
292			error = alloc_bounce_pages(dmat, pages);
293
294			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
295				if (error == 0)
296					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
297			} else {
298				error = 0;
299			}
300		}
301	} else {
302		*mapp = NULL;
303	}
304	if (error == 0)
305		dmat->map_count++;
306	return (error);
307}
308
309/*
310 * Destroy a handle for mapping from kva/uva/physical
311 * address space into bus device space.
312 */
313int
314bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
315{
316	if (map != NULL) {
317		if (STAILQ_FIRST(&map->bpages) != NULL)
318			return (EBUSY);
319		free(map, M_DEVBUF);
320	}
321	dmat->map_count--;
322	return (0);
323}
324
325
326/*
327 * Allocate a piece of memory that can be efficiently mapped into
328 * bus device space based on the constraints lited in the dma tag.
329 * A dmamap to for use with dmamap_load is also allocated.
330 */
331int
332bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags,
333		      bus_dmamap_t *mapp, bus_size_t size)
334{
335
336	if (size > dmat->maxsize)
337		return (ENOMEM);
338
339	/* If we succeed, no mapping/bouncing will be required */
340	*mapp = NULL;
341
342	if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
343		*vaddr = malloc(size, M_DEVBUF,
344				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
345	} else {
346		/*
347		 * XXX Use Contigmalloc until it is merged into this facility
348		 *     and handles multi-seg allocations.  Nobody is doing
349		 *     multi-seg allocations yet though.
350		 */
351		mtx_lock(&Giant);
352		*vaddr = contigmalloc(size, M_DEVBUF,
353		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
354		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
355		    dmat->boundary);
356		mtx_unlock(&Giant);
357	}
358	if (*vaddr == NULL)
359		return (ENOMEM);
360	return (0);
361}
362
363int
364bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
365		 bus_dmamap_t *mapp)
366{
367	return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize));
368}
369
370/*
371 * Free a piece of memory and it's allociated dmamap, that was allocated
372 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
373 */
374void
375bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map,
376		     bus_size_t size)
377{
378	/*
379	 * dmamem does not need to be bounced, so the map should be
380	 * NULL
381	 */
382	if (map != NULL)
383		panic("bus_dmamem_free: Invalid map freed\n");
384	if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
385		free(vaddr, M_DEVBUF);
386	else {
387		mtx_lock(&Giant);
388		contigfree(vaddr, size, M_DEVBUF);
389		mtx_unlock(&Giant);
390	}
391}
392
393void
394bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
395{
396	bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize);
397}
398
399#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1)
400
401/*
402 * Map the buffer buf into bus space using the dmamap map.
403 */
404int
405bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
406		bus_size_t buflen, bus_dmamap_callback_t *callback,
407		void *callback_arg, int flags)
408{
409	vm_offset_t		vaddr;
410	vm_offset_t		paddr;
411#ifdef __GNUC__
412	bus_dma_segment_t	dm_segments[dmat->nsegments];
413#else
414	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
415#endif
416	bus_dma_segment_t      *sg;
417	int			seg;
418	int			error;
419	vm_offset_t		nextpaddr;
420
421	if (map == NULL)
422		map = &nobounce_dmamap;
423
424	error = 0;
425	/*
426	 * If we are being called during a callback, pagesneeded will
427	 * be non-zero, so we can avoid doing the work twice.
428	 */
429	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
430		vm_offset_t	vendaddr;
431
432		/*
433		 * Count the number of bounce pages
434		 * needed in order to complete this transfer
435		 */
436		vaddr = trunc_page((vm_offset_t)buf);
437		vendaddr = (vm_offset_t)buf + buflen;
438
439		while (vaddr < vendaddr) {
440			paddr = pmap_kextract(vaddr);
441			if (run_filter(dmat, paddr) != 0) {
442
443				map->pagesneeded++;
444			}
445			vaddr += PAGE_SIZE;
446		}
447	}
448
449	/* Reserve Necessary Bounce Pages */
450	if (map->pagesneeded != 0) {
451		int s;
452
453		s = splhigh();
454	 	if (reserve_bounce_pages(dmat, map) != 0) {
455
456			/* Queue us for resources */
457			map->dmat = dmat;
458			map->buf = buf;
459			map->buflen = buflen;
460			map->callback = callback;
461			map->callback_arg = callback_arg;
462
463			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
464			splx(s);
465
466			return (EINPROGRESS);
467		}
468		splx(s);
469	}
470
471	vaddr = (vm_offset_t)buf;
472	sg = &dm_segments[0];
473	seg = 1;
474	sg->ds_len = 0;
475
476	nextpaddr = 0;
477	do {
478		bus_size_t	size;
479
480		paddr = pmap_kextract(vaddr);
481		size = PAGE_SIZE - (paddr & PAGE_MASK);
482		if (size > buflen)
483			size = buflen;
484
485		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
486			paddr = add_bounce_page(dmat, map, vaddr, size);
487		}
488
489		if (sg->ds_len == 0) {
490			sg->ds_addr = paddr;
491			sg->ds_len = size;
492		} else if (paddr == nextpaddr) {
493			sg->ds_len += size;
494		} else {
495			/* Go to the next segment */
496			sg++;
497			seg++;
498			if (seg > dmat->nsegments)
499				break;
500			sg->ds_addr = paddr;
501			sg->ds_len = size;
502		}
503		vaddr += size;
504		nextpaddr = paddr + size;
505		buflen -= size;
506
507	} while (buflen > 0);
508
509	if (buflen != 0) {
510		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
511		       (u_long)buflen);
512		error = EFBIG;
513	}
514
515	(*callback)(callback_arg, dm_segments, seg, error);
516
517	return (0);
518}
519
520/*
521 * Utility function to load a linear buffer.  lastaddrp holds state
522 * between invocations (for multiple-buffer loads).  segp contains
523 * the starting segment on entrace, and the ending segment on exit.
524 * first indicates if this is the first invocation of this function.
525 */
526static int
527_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
528			bus_dma_segment_t segs[],
529			void *buf, bus_size_t buflen,
530			struct thread *td,
531			int flags,
532			vm_offset_t *lastaddrp,
533			int *segp,
534			int first)
535{
536	bus_size_t sgsize;
537	bus_addr_t curaddr, lastaddr, baddr, bmask;
538	vm_offset_t vaddr = (vm_offset_t)buf;
539	int seg;
540	pmap_t pmap;
541
542	if (td != NULL)
543		pmap = vmspace_pmap(td->td_proc->p_vmspace);
544	else
545		pmap = NULL;
546
547	lastaddr = *lastaddrp;
548	bmask  = ~(dmat->boundary - 1);
549
550	for (seg = *segp; buflen > 0 ; ) {
551		/*
552		 * Get the physical address for this segment.
553		 */
554		if (pmap)
555			curaddr = pmap_extract(pmap, vaddr);
556		else
557			curaddr = pmap_kextract(vaddr);
558
559		/*
560		 * Compute the segment size, and adjust counts.
561		 */
562		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
563		if (buflen < sgsize)
564			sgsize = buflen;
565
566		/*
567		 * Make sure we don't cross any boundaries.
568		 */
569		if (dmat->boundary > 0) {
570			baddr = (curaddr + dmat->boundary) & bmask;
571			if (sgsize > (baddr - curaddr))
572				sgsize = (baddr - curaddr);
573		}
574
575		/*
576		 * Insert chunk into a segment, coalescing with
577		 * previous segment if possible.
578		 */
579		if (first) {
580			segs[seg].ds_addr = curaddr;
581			segs[seg].ds_len = sgsize;
582			first = 0;
583		} else {
584			if (curaddr == lastaddr &&
585			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
586			    (dmat->boundary == 0 ||
587			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
588				segs[seg].ds_len += sgsize;
589			else {
590				if (++seg >= dmat->nsegments)
591					break;
592				segs[seg].ds_addr = curaddr;
593				segs[seg].ds_len = sgsize;
594			}
595		}
596
597		lastaddr = curaddr + sgsize;
598		vaddr += sgsize;
599		buflen -= sgsize;
600	}
601
602	*segp = seg;
603	*lastaddrp = lastaddr;
604
605	/*
606	 * Did we fit?
607	 */
608	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
609}
610
611/*
612 * Like _bus_dmamap_load(), but for mbufs.
613 */
614int
615bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
616		     struct mbuf *m0,
617		     bus_dmamap_callback2_t *callback, void *callback_arg,
618		     int flags)
619{
620#ifdef __GNUC__
621	bus_dma_segment_t dm_segments[dmat->nsegments];
622#else
623	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
624#endif
625	int nsegs, error;
626
627	KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
628		("bus_dmamap_load_mbuf: No support for bounce pages!"));
629	KASSERT(m0->m_flags & M_PKTHDR,
630		("bus_dmamap_load_mbuf: no packet header"));
631
632	nsegs = 0;
633	error = 0;
634	if (m0->m_pkthdr.len <= dmat->maxsize) {
635		int first = 1;
636		vm_offset_t lastaddr = 0;
637		struct mbuf *m;
638
639		for (m = m0; m != NULL && error == 0; m = m->m_next) {
640			if (m->m_len > 0) {
641				error = _bus_dmamap_load_buffer(dmat,
642						dm_segments,
643						m->m_data, m->m_len,
644						NULL, flags, &lastaddr,
645						&nsegs, first);
646				first = 0;
647			}
648		}
649	} else {
650		error = EINVAL;
651	}
652
653	if (error) {
654		/* force "no valid mappings" in callback */
655		(*callback)(callback_arg, dm_segments, 0, 0, error);
656	} else {
657		(*callback)(callback_arg, dm_segments,
658			    nsegs+1, m0->m_pkthdr.len, error);
659	}
660	return (error);
661}
662
663/*
664 * Like _bus_dmamap_load(), but for uios.
665 */
666int
667bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
668		    struct uio *uio,
669		    bus_dmamap_callback2_t *callback, void *callback_arg,
670		    int flags)
671{
672	vm_offset_t lastaddr;
673#ifdef __GNUC__
674	bus_dma_segment_t dm_segments[dmat->nsegments];
675#else
676	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
677#endif
678	int nsegs, error, first, i;
679	bus_size_t resid;
680	struct iovec *iov;
681	struct thread *td = NULL;
682
683	KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
684		("bus_dmamap_load_uio: No support for bounce pages!"));
685
686	resid = uio->uio_resid;
687	iov = uio->uio_iov;
688
689	if (uio->uio_segflg == UIO_USERSPACE) {
690		td = uio->uio_td;
691		KASSERT(td != NULL,
692			("bus_dmamap_load_uio: USERSPACE but no proc"));
693	}
694
695	nsegs = 0;
696	error = 0;
697	first = 1;
698	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
699		/*
700		 * Now at the first iovec to load.  Load each iovec
701		 * until we have exhausted the residual count.
702		 */
703		bus_size_t minlen =
704			resid < iov[i].iov_len ? resid : iov[i].iov_len;
705		caddr_t addr = (caddr_t) iov[i].iov_base;
706
707		if (minlen > 0) {
708			error = _bus_dmamap_load_buffer(dmat,
709					dm_segments,
710					addr, minlen,
711					td, flags, &lastaddr, &nsegs, first);
712			first = 0;
713
714			resid -= minlen;
715		}
716	}
717
718	if (error) {
719		/* force "no valid mappings" in callback */
720		(*callback)(callback_arg, dm_segments, 0, 0, error);
721	} else {
722		(*callback)(callback_arg, dm_segments,
723			    nsegs+1, uio->uio_resid, error);
724	}
725	return (error);
726}
727
728/*
729 * Release the mapping held by map.
730 */
731void
732_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
733{
734	struct bounce_page *bpage;
735
736	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
737		STAILQ_REMOVE_HEAD(&map->bpages, links);
738		free_bounce_page(dmat, bpage);
739	}
740}
741
742void
743_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
744{
745	struct bounce_page *bpage;
746
747	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
748
749		/*
750		 * Handle data bouncing.  We might also
751		 * want to add support for invalidating
752		 * the caches on broken hardware
753		 */
754		switch (op) {
755		case BUS_DMASYNC_PREWRITE:
756			while (bpage != NULL) {
757				bcopy((void *)bpage->datavaddr,
758				      (void *)bpage->vaddr,
759				      bpage->datacount);
760				bpage = STAILQ_NEXT(bpage, links);
761			}
762			break;
763
764		case BUS_DMASYNC_POSTREAD:
765			while (bpage != NULL) {
766				bcopy((void *)bpage->vaddr,
767				      (void *)bpage->datavaddr,
768				      bpage->datacount);
769				bpage = STAILQ_NEXT(bpage, links);
770			}
771			break;
772		case BUS_DMASYNC_PREREAD:
773		case BUS_DMASYNC_POSTWRITE:
774			/* No-ops */
775			break;
776		}
777	}
778}
779
780static int
781alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
782{
783	int count;
784
785	count = 0;
786	if (total_bpages == 0) {
787		STAILQ_INIT(&bounce_page_list);
788		STAILQ_INIT(&bounce_map_waitinglist);
789		STAILQ_INIT(&bounce_map_callbacklist);
790	}
791
792	while (numpages > 0) {
793		struct bounce_page *bpage;
794		int s;
795
796		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
797						     M_NOWAIT | M_ZERO);
798
799		if (bpage == NULL)
800			break;
801		mtx_lock(&Giant);
802		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
803							 M_NOWAIT, 0ul,
804							 dmat->lowaddr,
805							 PAGE_SIZE,
806							 0);
807		mtx_unlock(&Giant);
808		if (bpage->vaddr == 0) {
809			free(bpage, M_DEVBUF);
810			break;
811		}
812		bpage->busaddr = pmap_kextract(bpage->vaddr);
813		s = splhigh();
814		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
815		total_bpages++;
816		free_bpages++;
817		splx(s);
818		count++;
819		numpages--;
820	}
821	return (count);
822}
823
824static int
825reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
826{
827	int pages;
828
829	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
830	free_bpages -= pages;
831	reserved_bpages += pages;
832	map->pagesreserved += pages;
833	pages = map->pagesneeded - map->pagesreserved;
834
835	return (pages);
836}
837
838static vm_offset_t
839add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
840		bus_size_t size)
841{
842	int s;
843	struct bounce_page *bpage;
844
845	if (map->pagesneeded == 0)
846		panic("add_bounce_page: map doesn't need any pages");
847	map->pagesneeded--;
848
849	if (map->pagesreserved == 0)
850		panic("add_bounce_page: map doesn't need any pages");
851	map->pagesreserved--;
852
853	s = splhigh();
854	bpage = STAILQ_FIRST(&bounce_page_list);
855	if (bpage == NULL)
856		panic("add_bounce_page: free page list is empty");
857
858	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
859	reserved_bpages--;
860	active_bpages++;
861	splx(s);
862
863	bpage->datavaddr = vaddr;
864	bpage->datacount = size;
865	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
866	return (bpage->busaddr);
867}
868
869static void
870free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
871{
872	int s;
873	struct bus_dmamap *map;
874
875	bpage->datavaddr = 0;
876	bpage->datacount = 0;
877
878	s = splhigh();
879	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
880	free_bpages++;
881	active_bpages--;
882	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
883		if (reserve_bounce_pages(map->dmat, map) == 0) {
884			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
885			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
886					   map, links);
887			busdma_swi_pending = 1;
888			swi_sched(vm_ih, 0);
889		}
890	}
891	splx(s);
892}
893
894void
895busdma_swi(void)
896{
897	int s;
898	struct bus_dmamap *map;
899
900	s = splhigh();
901	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
902		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
903		splx(s);
904		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
905				map->callback, map->callback_arg, /*flags*/0);
906		s = splhigh();
907	}
908	splx(s);
909}
910