busdma_machdep.c revision 110232
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 110232 2003-02-02 13:17:30Z alfred $
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/malloc.h>
32#include <sys/bus.h>
33#include <sys/interrupt.h>
34#include <sys/lock.h>
35#include <sys/proc.h>
36#include <sys/mutex.h>
37#include <sys/mbuf.h>
38#include <sys/uio.h>
39
40#include <vm/vm.h>
41#include <vm/vm_page.h>
42#include <vm/vm_map.h>
43
44#include <machine/bus.h>
45#include <machine/md_var.h>
46
47#define MAX_BPAGES 128
48
49struct bus_dma_tag {
50	bus_dma_tag_t	  parent;
51	bus_size_t	  alignment;
52	bus_size_t	  boundary;
53	bus_addr_t	  lowaddr;
54	bus_addr_t	  highaddr;
55	bus_dma_filter_t *filter;
56	void		 *filterarg;
57	bus_size_t	  maxsize;
58	u_int		  nsegments;
59	bus_size_t	  maxsegsz;
60	int		  flags;
61	int		  ref_count;
62	int		  map_count;
63};
64
65struct bounce_page {
66	vm_offset_t	vaddr;		/* kva of bounce buffer */
67	bus_addr_t	busaddr;	/* Physical address */
68	vm_offset_t	datavaddr;	/* kva of client data */
69	bus_size_t	datacount;	/* client data count */
70	STAILQ_ENTRY(bounce_page) links;
71};
72
73int busdma_swi_pending;
74
75static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
76static int free_bpages;
77static int reserved_bpages;
78static int active_bpages;
79static int total_bpages;
80static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
81
82struct bus_dmamap {
83	struct bp_list	       bpages;
84	int		       pagesneeded;
85	int		       pagesreserved;
86	bus_dma_tag_t	       dmat;
87	void		      *buf;		/* unmapped buffer pointer */
88	bus_size_t	       buflen;		/* unmapped buffer length */
89	bus_dmamap_callback_t *callback;
90	void		      *callback_arg;
91	STAILQ_ENTRY(bus_dmamap) links;
92};
93
94static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
95static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
96static struct bus_dmamap nobounce_dmamap;
97
98static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
99static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
100static vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
101				   vm_offset_t vaddr, bus_size_t size);
102static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
103static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
104
105/*
106 * Return true if a match is made.
107 *
108 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
109 *
110 * If paddr is within the bounds of the dma tag then call the filter callback
111 * to check for a match, if there is no filter callback then assume a match.
112 */
113static __inline int
114run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
115{
116	int retval;
117
118	retval = 0;
119	do {
120		if (paddr > dmat->lowaddr
121		 && paddr <= dmat->highaddr
122		 && (dmat->filter == NULL
123		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
124			retval = 1;
125
126		dmat = dmat->parent;
127	} while (retval == 0 && dmat != NULL);
128	return (retval);
129}
130
131#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
132/*
133 * Allocate a device specific dma_tag.
134 */
135int
136bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
137		   bus_size_t boundary, bus_addr_t lowaddr,
138		   bus_addr_t highaddr, bus_dma_filter_t *filter,
139		   void *filterarg, bus_size_t maxsize, int nsegments,
140		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
141{
142	bus_dma_tag_t newtag;
143	int error = 0;
144
145	/* Return a NULL tag on failure */
146	*dmat = NULL;
147
148	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
149	if (newtag == NULL)
150		return (ENOMEM);
151
152	newtag->parent = parent;
153	newtag->alignment = alignment;
154	newtag->boundary = boundary;
155	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
156	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
157	newtag->filter = filter;
158	newtag->filterarg = filterarg;
159	newtag->maxsize = maxsize;
160	newtag->nsegments = nsegments;
161	newtag->maxsegsz = maxsegsz;
162	newtag->flags = flags;
163	newtag->ref_count = 1; /* Count ourself */
164	newtag->map_count = 0;
165
166	/* Take into account any restrictions imposed by our parent tag */
167	if (parent != NULL) {
168		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
169		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
170		/*
171		 * XXX Not really correct??? Probably need to honor boundary
172		 *     all the way up the inheritence chain.
173		 */
174		newtag->boundary = MAX(parent->boundary, newtag->boundary);
175		if (newtag->filter == NULL) {
176			/*
177			 * Short circuit looking at our parent directly
178			 * since we have encapsulated all of its information
179			 */
180			newtag->filter = parent->filter;
181			newtag->filterarg = parent->filterarg;
182			newtag->parent = parent->parent;
183		}
184		if (newtag->parent != NULL) {
185			parent->ref_count++;
186		}
187	}
188
189	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
190		/* Must bounce */
191
192		if (lowaddr > bounce_lowaddr) {
193			/*
194			 * Go through the pool and kill any pages
195			 * that don't reside below lowaddr.
196			 */
197			panic("bus_dma_tag_create: page reallocation "
198			      "not implemented");
199		}
200		if (ptoa(total_bpages) < maxsize) {
201			int pages;
202
203			pages = atop(maxsize) - total_bpages;
204
205			/* Add pages to our bounce pool */
206			if (alloc_bounce_pages(newtag, pages) < pages)
207				error = ENOMEM;
208		}
209		/* Performed initial allocation */
210		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
211	}
212
213	if (error != 0) {
214		free(newtag, M_DEVBUF);
215	} else {
216		*dmat = newtag;
217	}
218	return (error);
219}
220
221int
222bus_dma_tag_destroy(bus_dma_tag_t dmat)
223{
224	if (dmat != NULL) {
225
226		if (dmat->map_count != 0)
227			return (EBUSY);
228
229		while (dmat != NULL) {
230			bus_dma_tag_t parent;
231
232			parent = dmat->parent;
233			dmat->ref_count--;
234			if (dmat->ref_count == 0) {
235				free(dmat, M_DEVBUF);
236				/*
237				 * Last reference count, so
238				 * release our reference
239				 * count on our parent.
240				 */
241				dmat = parent;
242			} else
243				dmat = NULL;
244		}
245	}
246	return (0);
247}
248
249/*
250 * Allocate a handle for mapping from kva/uva/physical
251 * address space into bus device space.
252 */
253int
254bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
255{
256	int error;
257
258	error = 0;
259
260	if (dmat->lowaddr < ptoa(Maxmem)) {
261		/* Must bounce */
262		int maxpages;
263
264		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
265					     M_NOWAIT | M_ZERO);
266		if (*mapp == NULL)
267			return (ENOMEM);
268
269		/* Initialize the new map */
270		STAILQ_INIT(&((*mapp)->bpages));
271
272		/*
273		 * Attempt to add pages to our pool on a per-instance
274		 * basis up to a sane limit.
275		 */
276		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
277		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
278		 || (dmat->map_count > 0
279		  && total_bpages < maxpages)) {
280			int pages;
281
282			if (dmat->lowaddr > bounce_lowaddr) {
283				/*
284				 * Go through the pool and kill any pages
285				 * that don't reside below lowaddr.
286				 */
287				panic("bus_dmamap_create: page reallocation "
288				      "not implemented");
289			}
290			pages = atop(dmat->maxsize);
291			pages = MIN(maxpages - total_bpages, pages);
292			error = alloc_bounce_pages(dmat, pages);
293
294			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
295				if (error == 0)
296					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
297			} else {
298				error = 0;
299			}
300		}
301	} else {
302		*mapp = NULL;
303	}
304	if (error == 0)
305		dmat->map_count++;
306	return (error);
307}
308
309/*
310 * Destroy a handle for mapping from kva/uva/physical
311 * address space into bus device space.
312 */
313int
314bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
315{
316	if (map != NULL) {
317		if (STAILQ_FIRST(&map->bpages) != NULL)
318			return (EBUSY);
319		free(map, M_DEVBUF);
320	}
321	dmat->map_count--;
322	return (0);
323}
324
325
326/*
327 * Allocate a piece of memory that can be efficiently mapped into
328 * bus device space based on the constraints lited in the dma tag.
329 * A dmamap to for use with dmamap_load is also allocated.
330 */
331int
332bus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags,
333		      bus_dmamap_t *mapp, bus_size_t size)
334{
335
336	if (size > dmat->maxsize)
337		return (ENOMEM);
338
339	/* If we succeed, no mapping/bouncing will be required */
340	*mapp = NULL;
341
342	if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
343		*vaddr = malloc(size, M_DEVBUF,
344				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0);
345	} else {
346		/*
347		 * XXX Use Contigmalloc until it is merged into this facility
348		 *     and handles multi-seg allocations.  Nobody is doing
349		 *     multi-seg allocations yet though.
350		 */
351		*vaddr = contigmalloc(size, M_DEVBUF,
352		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : 0,
353		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
354		    dmat->boundary);
355	}
356	if (*vaddr == NULL)
357		return (ENOMEM);
358	return (0);
359}
360
361int
362bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
363		 bus_dmamap_t *mapp)
364{
365	return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize));
366}
367
368/*
369 * Free a piece of memory and it's allociated dmamap, that was allocated
370 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
371 */
372void
373bus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map,
374		     bus_size_t size)
375{
376	/*
377	 * dmamem does not need to be bounced, so the map should be
378	 * NULL
379	 */
380	if (map != NULL)
381		panic("bus_dmamem_free: Invalid map freed\n");
382	if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
383		free(vaddr, M_DEVBUF);
384	else
385		contigfree(vaddr, size, M_DEVBUF);
386}
387
388void
389bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
390{
391	bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize);
392}
393
394#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
395
396/*
397 * Map the buffer buf into bus space using the dmamap map.
398 */
399int
400bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
401		bus_size_t buflen, bus_dmamap_callback_t *callback,
402		void *callback_arg, int flags)
403{
404	vm_offset_t		vaddr;
405	vm_offset_t		paddr;
406#ifdef __GNUC__
407	bus_dma_segment_t	dm_segments[dmat->nsegments];
408#else
409	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
410#endif
411	bus_dma_segment_t      *sg;
412	int			seg;
413	int			error;
414	vm_offset_t		nextpaddr;
415
416	if (map == NULL)
417		map = &nobounce_dmamap;
418
419	error = 0;
420	/*
421	 * If we are being called during a callback, pagesneeded will
422	 * be non-zero, so we can avoid doing the work twice.
423	 */
424	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
425		vm_offset_t	vendaddr;
426
427		/*
428		 * Count the number of bounce pages
429		 * needed in order to complete this transfer
430		 */
431		vaddr = trunc_page((vm_offset_t)buf);
432		vendaddr = (vm_offset_t)buf + buflen;
433
434		while (vaddr < vendaddr) {
435			paddr = pmap_kextract(vaddr);
436			if (run_filter(dmat, paddr) != 0) {
437
438				map->pagesneeded++;
439			}
440			vaddr += PAGE_SIZE;
441		}
442	}
443
444	/* Reserve Necessary Bounce Pages */
445	if (map->pagesneeded != 0) {
446		int s;
447
448		s = splhigh();
449	 	if (reserve_bounce_pages(dmat, map) != 0) {
450
451			/* Queue us for resources */
452			map->dmat = dmat;
453			map->buf = buf;
454			map->buflen = buflen;
455			map->callback = callback;
456			map->callback_arg = callback_arg;
457
458			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
459			splx(s);
460
461			return (EINPROGRESS);
462		}
463		splx(s);
464	}
465
466	vaddr = (vm_offset_t)buf;
467	sg = &dm_segments[0];
468	seg = 1;
469	sg->ds_len = 0;
470
471	nextpaddr = 0;
472	do {
473		bus_size_t	size;
474
475		paddr = pmap_kextract(vaddr);
476		size = PAGE_SIZE - (paddr & PAGE_MASK);
477		if (size > buflen)
478			size = buflen;
479
480		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
481			paddr = add_bounce_page(dmat, map, vaddr, size);
482		}
483
484		if (sg->ds_len == 0) {
485			sg->ds_addr = paddr;
486			sg->ds_len = size;
487		} else if (paddr == nextpaddr) {
488			sg->ds_len += size;
489		} else {
490			/* Go to the next segment */
491			sg++;
492			seg++;
493			if (seg > dmat->nsegments)
494				break;
495			sg->ds_addr = paddr;
496			sg->ds_len = size;
497		}
498		vaddr += size;
499		nextpaddr = paddr + size;
500		buflen -= size;
501
502	} while (buflen > 0);
503
504	if (buflen != 0) {
505		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
506		       (u_long)buflen);
507		error = EFBIG;
508	}
509
510	(*callback)(callback_arg, dm_segments, seg, error);
511
512	return (0);
513}
514
515/*
516 * Utility function to load a linear buffer.  lastaddrp holds state
517 * between invocations (for multiple-buffer loads).  segp contains
518 * the starting segment on entrace, and the ending segment on exit.
519 * first indicates if this is the first invocation of this function.
520 */
521static int
522_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
523			bus_dma_segment_t segs[],
524			void *buf, bus_size_t buflen,
525			struct thread *td,
526			int flags,
527			vm_offset_t *lastaddrp,
528			int *segp,
529			int first)
530{
531	bus_size_t sgsize;
532	bus_addr_t curaddr, lastaddr, baddr, bmask;
533	vm_offset_t vaddr = (vm_offset_t)buf;
534	int seg;
535	pmap_t pmap;
536
537	if (td != NULL)
538		pmap = vmspace_pmap(td->td_proc->p_vmspace);
539	else
540		pmap = NULL;
541
542	lastaddr = *lastaddrp;
543	bmask  = ~(dmat->boundary - 1);
544
545	for (seg = *segp; buflen > 0 ; ) {
546		/*
547		 * Get the physical address for this segment.
548		 */
549		if (pmap)
550			curaddr = pmap_extract(pmap, vaddr);
551		else
552			curaddr = pmap_kextract(vaddr);
553
554		/*
555		 * Compute the segment size, and adjust counts.
556		 */
557		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
558		if (buflen < sgsize)
559			sgsize = buflen;
560
561		/*
562		 * Make sure we don't cross any boundaries.
563		 */
564		if (dmat->boundary > 0) {
565			baddr = (curaddr + dmat->boundary) & bmask;
566			if (sgsize > (baddr - curaddr))
567				sgsize = (baddr - curaddr);
568		}
569
570		/*
571		 * Insert chunk into a segment, coalescing with
572		 * previous segment if possible.
573		 */
574		if (first) {
575			segs[seg].ds_addr = curaddr;
576			segs[seg].ds_len = sgsize;
577			first = 0;
578		} else {
579			if (curaddr == lastaddr &&
580			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
581			    (dmat->boundary == 0 ||
582			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
583				segs[seg].ds_len += sgsize;
584			else {
585				if (++seg >= dmat->nsegments)
586					break;
587				segs[seg].ds_addr = curaddr;
588				segs[seg].ds_len = sgsize;
589			}
590		}
591
592		lastaddr = curaddr + sgsize;
593		vaddr += sgsize;
594		buflen -= sgsize;
595	}
596
597	*segp = seg;
598	*lastaddrp = lastaddr;
599
600	/*
601	 * Did we fit?
602	 */
603	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
604}
605
606/*
607 * Like _bus_dmamap_load(), but for mbufs.
608 */
609int
610bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
611		     struct mbuf *m0,
612		     bus_dmamap_callback2_t *callback, void *callback_arg,
613		     int flags)
614{
615#ifdef __GNUC__
616	bus_dma_segment_t dm_segments[dmat->nsegments];
617#else
618	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
619#endif
620	int nsegs, error;
621
622	KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
623		("bus_dmamap_load_mbuf: No support for bounce pages!"));
624	KASSERT(m0->m_flags & M_PKTHDR,
625		("bus_dmamap_load_mbuf: no packet header"));
626
627	nsegs = 0;
628	error = 0;
629	if (m0->m_pkthdr.len <= dmat->maxsize) {
630		int first = 1;
631		vm_offset_t lastaddr = 0;
632		struct mbuf *m;
633
634		for (m = m0; m != NULL && error == 0; m = m->m_next) {
635			error = _bus_dmamap_load_buffer(dmat,
636					dm_segments,
637					m->m_data, m->m_len,
638					NULL, flags, &lastaddr, &nsegs, first);
639			first = 0;
640		}
641	} else {
642		error = EINVAL;
643	}
644
645	if (error) {
646		/* force "no valid mappings" in callback */
647		(*callback)(callback_arg, dm_segments, 0, 0, error);
648	} else {
649		(*callback)(callback_arg, dm_segments,
650			    nsegs+1, m0->m_pkthdr.len, error);
651	}
652	return (error);
653}
654
655/*
656 * Like _bus_dmamap_load(), but for uios.
657 */
658int
659bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
660		    struct uio *uio,
661		    bus_dmamap_callback2_t *callback, void *callback_arg,
662		    int flags)
663{
664	vm_offset_t lastaddr;
665#ifdef __GNUC__
666	bus_dma_segment_t dm_segments[dmat->nsegments];
667#else
668	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
669#endif
670	int nsegs, error, first, i;
671	bus_size_t resid;
672	struct iovec *iov;
673	struct thread *td = NULL;
674
675	KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
676		("bus_dmamap_load_uio: No support for bounce pages!"));
677
678	resid = uio->uio_resid;
679	iov = uio->uio_iov;
680
681	if (uio->uio_segflg == UIO_USERSPACE) {
682		td = uio->uio_td;
683		KASSERT(td != NULL,
684			("bus_dmamap_load_uio: USERSPACE but no proc"));
685	}
686
687	nsegs = 0;
688	error = 0;
689	first = 1;
690	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
691		/*
692		 * Now at the first iovec to load.  Load each iovec
693		 * until we have exhausted the residual count.
694		 */
695		bus_size_t minlen =
696			resid < iov[i].iov_len ? resid : iov[i].iov_len;
697		caddr_t addr = (caddr_t) iov[i].iov_base;
698
699		error = _bus_dmamap_load_buffer(dmat,
700				dm_segments,
701				addr, minlen,
702				td, flags, &lastaddr, &nsegs, first);
703		first = 0;
704
705		resid -= minlen;
706	}
707
708	if (error) {
709		/* force "no valid mappings" in callback */
710		(*callback)(callback_arg, dm_segments, 0, 0, error);
711	} else {
712		(*callback)(callback_arg, dm_segments,
713			    nsegs+1, uio->uio_resid, error);
714	}
715	return (error);
716}
717
718/*
719 * Release the mapping held by map.
720 */
721void
722_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
723{
724	struct bounce_page *bpage;
725
726	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
727		STAILQ_REMOVE_HEAD(&map->bpages, links);
728		free_bounce_page(dmat, bpage);
729	}
730}
731
732void
733_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
734{
735	struct bounce_page *bpage;
736
737	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
738
739		/*
740		 * Handle data bouncing.  We might also
741		 * want to add support for invalidating
742		 * the caches on broken hardware
743		 */
744		switch (op) {
745		case BUS_DMASYNC_PREWRITE:
746			while (bpage != NULL) {
747				bcopy((void *)bpage->datavaddr,
748				      (void *)bpage->vaddr,
749				      bpage->datacount);
750				bpage = STAILQ_NEXT(bpage, links);
751			}
752			break;
753
754		case BUS_DMASYNC_POSTREAD:
755			while (bpage != NULL) {
756				bcopy((void *)bpage->vaddr,
757				      (void *)bpage->datavaddr,
758				      bpage->datacount);
759				bpage = STAILQ_NEXT(bpage, links);
760			}
761			break;
762		case BUS_DMASYNC_PREREAD:
763		case BUS_DMASYNC_POSTWRITE:
764			/* No-ops */
765			break;
766		}
767	}
768}
769
770static int
771alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
772{
773	int count;
774
775	count = 0;
776	if (total_bpages == 0) {
777		STAILQ_INIT(&bounce_page_list);
778		STAILQ_INIT(&bounce_map_waitinglist);
779		STAILQ_INIT(&bounce_map_callbacklist);
780	}
781
782	while (numpages > 0) {
783		struct bounce_page *bpage;
784		int s;
785
786		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
787						     M_NOWAIT | M_ZERO);
788
789		if (bpage == NULL)
790			break;
791		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
792							 M_NOWAIT, 0ul,
793							 dmat->lowaddr,
794							 PAGE_SIZE,
795							 0);
796		if (bpage->vaddr == 0) {
797			free(bpage, M_DEVBUF);
798			break;
799		}
800		bpage->busaddr = pmap_kextract(bpage->vaddr);
801		s = splhigh();
802		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
803		total_bpages++;
804		free_bpages++;
805		splx(s);
806		count++;
807		numpages--;
808	}
809	return (count);
810}
811
812static int
813reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
814{
815	int pages;
816
817	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
818	free_bpages -= pages;
819	reserved_bpages += pages;
820	map->pagesreserved += pages;
821	pages = map->pagesneeded - map->pagesreserved;
822
823	return (pages);
824}
825
826static vm_offset_t
827add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
828		bus_size_t size)
829{
830	int s;
831	struct bounce_page *bpage;
832
833	if (map->pagesneeded == 0)
834		panic("add_bounce_page: map doesn't need any pages");
835	map->pagesneeded--;
836
837	if (map->pagesreserved == 0)
838		panic("add_bounce_page: map doesn't need any pages");
839	map->pagesreserved--;
840
841	s = splhigh();
842	bpage = STAILQ_FIRST(&bounce_page_list);
843	if (bpage == NULL)
844		panic("add_bounce_page: free page list is empty");
845
846	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
847	reserved_bpages--;
848	active_bpages++;
849	splx(s);
850
851	bpage->datavaddr = vaddr;
852	bpage->datacount = size;
853	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
854	return (bpage->busaddr);
855}
856
857static void
858free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
859{
860	int s;
861	struct bus_dmamap *map;
862
863	bpage->datavaddr = 0;
864	bpage->datacount = 0;
865
866	s = splhigh();
867	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
868	free_bpages++;
869	active_bpages--;
870	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
871		if (reserve_bounce_pages(map->dmat, map) == 0) {
872			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
873			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
874					   map, links);
875			busdma_swi_pending = 1;
876			swi_sched(vm_ih, 0);
877		}
878	}
879	splx(s);
880}
881
882void
883busdma_swi(void)
884{
885	int s;
886	struct bus_dmamap *map;
887
888	s = splhigh();
889	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
890		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
891		splx(s);
892		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
893				map->callback, map->callback_arg, /*flags*/0);
894		s = splhigh();
895	}
896	splx(s);
897}
898