busdma_machdep.c revision 104486
1251875Speter/*
2251875Speter * Copyright (c) 1997, 1998 Justin T. Gibbs.
3251875Speter * All rights reserved.
4251875Speter *
5251875Speter * Redistribution and use in source and binary forms, with or without
6251875Speter * modification, are permitted provided that the following conditions
7251875Speter * are met:
8251875Speter * 1. Redistributions of source code must retain the above copyright
9251875Speter *    notice, this list of conditions, and the following disclaimer,
10251875Speter *    without modification, immediately at the beginning of the file.
11251875Speter * 2. The name of the author may not be used to endorse or promote products
12251875Speter *    derived from this software without specific prior written permission.
13251875Speter *
14251875Speter * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15251875Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16251875Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17251875Speter * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18251875Speter * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19251875Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20251875Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21251875Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22251875Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23251875Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24251875Speter * SUCH DAMAGE.
25251875Speter *
26251875Speter * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 104486 2002-10-04 20:40:39Z sam $
27251875Speter */
28251875Speter
29251875Speter#include <sys/param.h>
30251875Speter#include <sys/systm.h>
31251875Speter#include <sys/malloc.h>
32251875Speter#include <sys/bus.h>
33251875Speter#include <sys/interrupt.h>
34251875Speter#include <sys/lock.h>
35251875Speter#include <sys/proc.h>
36251875Speter#include <sys/mutex.h>
37251875Speter#include <sys/mbuf.h>
38251875Speter#include <sys/uio.h>
39251875Speter
40251875Speter#include <vm/vm.h>
41251875Speter#include <vm/vm_page.h>
42251875Speter#include <vm/vm_map.h>
43251875Speter
44251875Speter#include <machine/bus.h>
45251875Speter#include <machine/md_var.h>
46251875Speter
47251875Speter#define MAX(a,b) (((a) > (b)) ? (a) : (b))
48251875Speter#define MIN(a,b) (((a) < (b)) ? (a) : (b))
49251875Speter#define MAX_BPAGES 128
50251875Speter
51251875Speterstruct bus_dma_tag {
52251875Speter	bus_dma_tag_t	  parent;
53251875Speter	bus_size_t	  alignment;
54251875Speter	bus_size_t	  boundary;
55251875Speter	bus_addr_t	  lowaddr;
56251875Speter	bus_addr_t	  highaddr;
57251875Speter	bus_dma_filter_t *filter;
58251875Speter	void		 *filterarg;
59251875Speter	bus_size_t	  maxsize;
60251875Speter	u_int		  nsegments;
61251875Speter	bus_size_t	  maxsegsz;
62251875Speter	int		  flags;
63251875Speter	int		  ref_count;
64251875Speter	int		  map_count;
65251875Speter};
66251875Speter
67251875Speterstruct bounce_page {
68251875Speter	vm_offset_t	vaddr;		/* kva of bounce buffer */
69251875Speter	bus_addr_t	busaddr;	/* Physical address */
70251875Speter	vm_offset_t	datavaddr;	/* kva of client data */
71251875Speter	bus_size_t	datacount;	/* client data count */
72251875Speter	STAILQ_ENTRY(bounce_page) links;
73251875Speter};
74251875Speter
75251875Speterint busdma_swi_pending;
76251875Speter
77251875Speterstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
78251875Speterstatic int free_bpages;
79251875Speterstatic int reserved_bpages;
80251875Speterstatic int active_bpages;
81251875Speterstatic int total_bpages;
82251875Speterstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
83251875Speter
84251875Speterstruct bus_dmamap {
85251875Speter	struct bp_list	       bpages;
86251875Speter	int		       pagesneeded;
87251875Speter	int		       pagesreserved;
88251875Speter	bus_dma_tag_t	       dmat;
89251875Speter	void		      *buf;		/* unmapped buffer pointer */
90251875Speter	bus_size_t	       buflen;		/* unmapped buffer length */
91251875Speter	bus_dmamap_callback_t *callback;
92251875Speter	void		      *callback_arg;
93251875Speter	STAILQ_ENTRY(bus_dmamap) links;
94251875Speter};
95251875Speter
96251875Speterstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
97251875Speterstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
98251875Speterstatic struct bus_dmamap nobounce_dmamap;
99251875Speter
100251875Speterstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
101251875Speterstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
102251875Speterstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
103251875Speter				   vm_offset_t vaddr, bus_size_t size);
104251875Speterstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
105251875Speterstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
106251875Speter
107251875Speter/*
108251875Speter * Return true if a match is made.
109251875Speter *
110251875Speter * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
111251875Speter *
112251875Speter * If paddr is within the bounds of the dma tag then call the filter callback
113251875Speter * to check for a match, if there is no filter callback then assume a match.
114251875Speter */
115251875Speterstatic __inline int
116251875Speterrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
117251875Speter{
118251875Speter	int retval;
119251875Speter
120251875Speter	retval = 0;
121251875Speter	do {
122251875Speter		if (paddr > dmat->lowaddr
123251875Speter		 && paddr <= dmat->highaddr
124251875Speter		 && (dmat->filter == NULL
125251875Speter		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
126251875Speter			retval = 1;
127251875Speter
128251875Speter		dmat = dmat->parent;
129251875Speter	} while (retval == 0 && dmat != NULL);
130	return (retval);
131}
132
133#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
134/*
135 * Allocate a device specific dma_tag.
136 */
137int
138bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
139		   bus_size_t boundary, bus_addr_t lowaddr,
140		   bus_addr_t highaddr, bus_dma_filter_t *filter,
141		   void *filterarg, bus_size_t maxsize, int nsegments,
142		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
143{
144	bus_dma_tag_t newtag;
145	int error = 0;
146
147	/* Return a NULL tag on failure */
148	*dmat = NULL;
149
150	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
151	if (newtag == NULL)
152		return (ENOMEM);
153
154	newtag->parent = parent;
155	newtag->alignment = alignment;
156	newtag->boundary = boundary;
157	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
158	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
159	newtag->filter = filter;
160	newtag->filterarg = filterarg;
161	newtag->maxsize = maxsize;
162	newtag->nsegments = nsegments;
163	newtag->maxsegsz = maxsegsz;
164	newtag->flags = flags;
165	newtag->ref_count = 1; /* Count ourself */
166	newtag->map_count = 0;
167
168	/* Take into account any restrictions imposed by our parent tag */
169	if (parent != NULL) {
170		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
171		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
172		/*
173		 * XXX Not really correct??? Probably need to honor boundary
174		 *     all the way up the inheritence chain.
175		 */
176		newtag->boundary = MAX(parent->boundary, newtag->boundary);
177		if (newtag->filter == NULL) {
178			/*
179			 * Short circuit looking at our parent directly
180			 * since we have encapsulated all of its information
181			 */
182			newtag->filter = parent->filter;
183			newtag->filterarg = parent->filterarg;
184			newtag->parent = parent->parent;
185		}
186		if (newtag->parent != NULL) {
187			parent->ref_count++;
188		}
189	}
190
191	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
192		/* Must bounce */
193
194		if (lowaddr > bounce_lowaddr) {
195			/*
196			 * Go through the pool and kill any pages
197			 * that don't reside below lowaddr.
198			 */
199			panic("bus_dma_tag_create: page reallocation "
200			      "not implemented");
201		}
202		if (ptoa(total_bpages) < maxsize) {
203			int pages;
204
205			pages = atop(maxsize) - total_bpages;
206
207			/* Add pages to our bounce pool */
208			if (alloc_bounce_pages(newtag, pages) < pages)
209				error = ENOMEM;
210		}
211		/* Performed initial allocation */
212		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
213	}
214
215	if (error != 0) {
216		free(newtag, M_DEVBUF);
217	} else {
218		*dmat = newtag;
219	}
220	return (error);
221}
222
223int
224bus_dma_tag_destroy(bus_dma_tag_t dmat)
225{
226	if (dmat != NULL) {
227
228		if (dmat->map_count != 0)
229			return (EBUSY);
230
231		while (dmat != NULL) {
232			bus_dma_tag_t parent;
233
234			parent = dmat->parent;
235			dmat->ref_count--;
236			if (dmat->ref_count == 0) {
237				free(dmat, M_DEVBUF);
238				/*
239				 * Last reference count, so
240				 * release our reference
241				 * count on our parent.
242				 */
243				dmat = parent;
244			} else
245				dmat = NULL;
246		}
247	}
248	return (0);
249}
250
251/*
252 * Allocate a handle for mapping from kva/uva/physical
253 * address space into bus device space.
254 */
255int
256bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
257{
258	int error;
259
260	error = 0;
261
262	if (dmat->lowaddr < ptoa(Maxmem)) {
263		/* Must bounce */
264		int maxpages;
265
266		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
267					     M_NOWAIT | M_ZERO);
268		if (*mapp == NULL)
269			return (ENOMEM);
270
271		/* Initialize the new map */
272		STAILQ_INIT(&((*mapp)->bpages));
273
274		/*
275		 * Attempt to add pages to our pool on a per-instance
276		 * basis up to a sane limit.
277		 */
278		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
279		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
280		 || (dmat->map_count > 0
281		  && total_bpages < maxpages)) {
282			int pages;
283
284			if (dmat->lowaddr > bounce_lowaddr) {
285				/*
286				 * Go through the pool and kill any pages
287				 * that don't reside below lowaddr.
288				 */
289				panic("bus_dmamap_create: page reallocation "
290				      "not implemented");
291			}
292			pages = atop(dmat->maxsize);
293			pages = MIN(maxpages - total_bpages, pages);
294			error = alloc_bounce_pages(dmat, pages);
295
296			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
297				if (error == 0)
298					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
299			} else {
300				error = 0;
301			}
302		}
303	} else {
304		*mapp = NULL;
305	}
306	if (error == 0)
307		dmat->map_count++;
308	return (error);
309}
310
311/*
312 * Destroy a handle for mapping from kva/uva/physical
313 * address space into bus device space.
314 */
315int
316bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
317{
318	if (map != NULL) {
319		if (STAILQ_FIRST(&map->bpages) != NULL)
320			return (EBUSY);
321		free(map, M_DEVBUF);
322	}
323	dmat->map_count--;
324	return (0);
325}
326
327
328/*
329 * Allocate a piece of memory that can be efficiently mapped into
330 * bus device space based on the constraints lited in the dma tag.
331 * A dmamap to for use with dmamap_load is also allocated.
332 */
333int
334bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
335		 bus_dmamap_t *mapp)
336{
337	/* If we succeed, no mapping/bouncing will be required */
338	*mapp = NULL;
339
340	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
341		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
342				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
343	} else {
344		/*
345		 * XXX Use Contigmalloc until it is merged into this facility
346		 *     and handles multi-seg allocations.  Nobody is doing
347		 *     multi-seg allocations yet though.
348		 */
349		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
350		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
351		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
352		    dmat->boundary);
353	}
354	if (*vaddr == NULL)
355		return (ENOMEM);
356	return (0);
357}
358
359/*
360 * Free a piece of memory and it's allociated dmamap, that was allocated
361 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
362 */
363void
364bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
365{
366	/*
367	 * dmamem does not need to be bounced, so the map should be
368	 * NULL
369	 */
370	if (map != NULL)
371		panic("bus_dmamem_free: Invalid map freed\n");
372	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
373		free(vaddr, M_DEVBUF);
374	else
375		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
376}
377
378#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
379
380/*
381 * Map the buffer buf into bus space using the dmamap map.
382 */
383int
384bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
385		bus_size_t buflen, bus_dmamap_callback_t *callback,
386		void *callback_arg, int flags)
387{
388	vm_offset_t		vaddr;
389	vm_offset_t		paddr;
390#ifdef __GNUC__
391	bus_dma_segment_t	dm_segments[dmat->nsegments];
392#else
393	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
394#endif
395	bus_dma_segment_t      *sg;
396	int			seg;
397	int			error;
398	vm_offset_t		nextpaddr;
399
400	if (map == NULL)
401		map = &nobounce_dmamap;
402
403	error = 0;
404	/*
405	 * If we are being called during a callback, pagesneeded will
406	 * be non-zero, so we can avoid doing the work twice.
407	 */
408	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
409		vm_offset_t	vendaddr;
410
411		/*
412		 * Count the number of bounce pages
413		 * needed in order to complete this transfer
414		 */
415		vaddr = trunc_page((vm_offset_t)buf);
416		vendaddr = (vm_offset_t)buf + buflen;
417
418		while (vaddr < vendaddr) {
419			paddr = pmap_kextract(vaddr);
420			if (run_filter(dmat, paddr) != 0) {
421
422				map->pagesneeded++;
423			}
424			vaddr += PAGE_SIZE;
425		}
426	}
427
428	/* Reserve Necessary Bounce Pages */
429	if (map->pagesneeded != 0) {
430		int s;
431
432		s = splhigh();
433	 	if (reserve_bounce_pages(dmat, map) != 0) {
434
435			/* Queue us for resources */
436			map->dmat = dmat;
437			map->buf = buf;
438			map->buflen = buflen;
439			map->callback = callback;
440			map->callback_arg = callback_arg;
441
442			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
443			splx(s);
444
445			return (EINPROGRESS);
446		}
447		splx(s);
448	}
449
450	vaddr = (vm_offset_t)buf;
451	sg = &dm_segments[0];
452	seg = 1;
453	sg->ds_len = 0;
454
455	nextpaddr = 0;
456	do {
457		bus_size_t	size;
458
459		paddr = pmap_kextract(vaddr);
460		size = PAGE_SIZE - (paddr & PAGE_MASK);
461		if (size > buflen)
462			size = buflen;
463
464		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
465			paddr = add_bounce_page(dmat, map, vaddr, size);
466		}
467
468		if (sg->ds_len == 0) {
469			sg->ds_addr = paddr;
470			sg->ds_len = size;
471		} else if (paddr == nextpaddr) {
472			sg->ds_len += size;
473		} else {
474			/* Go to the next segment */
475			sg++;
476			seg++;
477			if (seg > dmat->nsegments)
478				break;
479			sg->ds_addr = paddr;
480			sg->ds_len = size;
481		}
482		vaddr += size;
483		nextpaddr = paddr + size;
484		buflen -= size;
485
486	} while (buflen > 0);
487
488	if (buflen != 0) {
489		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
490		       (u_long)buflen);
491		error = EFBIG;
492	}
493
494	(*callback)(callback_arg, dm_segments, seg, error);
495
496	return (0);
497}
498
499/*
500 * Utility function to load a linear buffer.  lastaddrp holds state
501 * between invocations (for multiple-buffer loads).  segp contains
502 * the starting segment on entrace, and the ending segment on exit.
503 * first indicates if this is the first invocation of this function.
504 */
505static int
506_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
507			bus_dma_segment_t segs[],
508			void *buf, bus_size_t buflen,
509			struct thread *td,
510			int flags,
511			vm_offset_t *lastaddrp,
512			int *segp,
513			int first)
514{
515	bus_size_t sgsize;
516	bus_addr_t curaddr, lastaddr, baddr, bmask;
517	vm_offset_t vaddr = (vm_offset_t)buf;
518	int seg;
519	pmap_t pmap;
520
521	if (td != NULL)
522		pmap = vmspace_pmap(td->td_proc->p_vmspace);
523	else
524		pmap = NULL;
525
526	lastaddr = *lastaddrp;
527	bmask  = ~(dmat->boundary - 1);
528
529	for (seg = *segp; buflen > 0 ; ) {
530		/*
531		 * Get the physical address for this segment.
532		 */
533		if (pmap)
534			curaddr = pmap_extract(pmap, vaddr);
535		else
536			curaddr = pmap_kextract(vaddr);
537
538		/*
539		 * Compute the segment size, and adjust counts.
540		 */
541		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
542		if (buflen < sgsize)
543			sgsize = buflen;
544
545		/*
546		 * Make sure we don't cross any boundaries.
547		 */
548		if (dmat->boundary > 0) {
549			baddr = (curaddr + dmat->boundary) & bmask;
550			if (sgsize > (baddr - curaddr))
551				sgsize = (baddr - curaddr);
552		}
553
554		/*
555		 * Insert chunk into a segment, coalescing with
556		 * previous segment if possible.
557		 */
558		if (first) {
559			segs[seg].ds_addr = curaddr;
560			segs[seg].ds_len = sgsize;
561			first = 0;
562		} else {
563			if (curaddr == lastaddr &&
564			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
565			    (dmat->boundary == 0 ||
566			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
567				segs[seg].ds_len += sgsize;
568			else {
569				if (++seg >= dmat->nsegments)
570					break;
571				segs[seg].ds_addr = curaddr;
572				segs[seg].ds_len = sgsize;
573			}
574		}
575
576		lastaddr = curaddr + sgsize;
577		vaddr += sgsize;
578		buflen -= sgsize;
579	}
580
581	*segp = seg;
582	*lastaddrp = lastaddr;
583
584	/*
585	 * Did we fit?
586	 */
587	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
588}
589
590/*
591 * Like _bus_dmamap_load(), but for mbufs.
592 */
593int
594bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
595		     struct mbuf *m0,
596		     bus_dmamap_callback2_t *callback, void *callback_arg,
597		     int flags)
598{
599#ifdef __GNUC__
600	bus_dma_segment_t dm_segments[dmat->nsegments];
601#else
602	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
603#endif
604	int nsegs, error;
605
606	KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
607		("bus_dmamap_load_mbuf: No support for bounce pages!"));
608	KASSERT(m0->m_flags & M_PKTHDR,
609		("bus_dmamap_load_mbuf: no packet header"));
610
611	nsegs = 0;
612	error = 0;
613	if (m0->m_pkthdr.len <= dmat->maxsize) {
614		int first = 1;
615		vm_offset_t lastaddr = 0;
616		struct mbuf *m;
617
618		for (m = m0; m != NULL && error == 0; m = m->m_next) {
619			error = _bus_dmamap_load_buffer(dmat,
620					dm_segments,
621					m->m_data, m->m_len,
622					NULL, flags, &lastaddr, &nsegs, first);
623			first = 0;
624		}
625	} else {
626		error = EINVAL;
627	}
628
629	if (error) {
630		/* force "no valid mappings" in callback */
631		(*callback)(callback_arg, dm_segments, 0, 0, error);
632	} else {
633		(*callback)(callback_arg, dm_segments,
634			    nsegs+1, m0->m_pkthdr.len, error);
635	}
636	return (error);
637}
638
639/*
640 * Like _bus_dmamap_load(), but for uios.
641 */
642int
643bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
644		    struct uio *uio,
645		    bus_dmamap_callback2_t *callback, void *callback_arg,
646		    int flags)
647{
648	vm_offset_t lastaddr;
649#ifdef __GNUC__
650	bus_dma_segment_t dm_segments[dmat->nsegments];
651#else
652	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
653#endif
654	int nsegs, error, first, i;
655	bus_size_t resid;
656	struct iovec *iov;
657	struct thread *td = NULL;
658
659	KASSERT(dmat->lowaddr >= ptoa(Maxmem) || map != NULL,
660		("bus_dmamap_load_uio: No support for bounce pages!"));
661
662	resid = uio->uio_resid;
663	iov = uio->uio_iov;
664
665	if (uio->uio_segflg == UIO_USERSPACE) {
666		td = uio->uio_td;
667		KASSERT(td != NULL,
668			("bus_dmamap_load_uio: USERSPACE but no proc"));
669	}
670
671	nsegs = 0;
672	error = 0;
673	first = 1;
674	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
675		/*
676		 * Now at the first iovec to load.  Load each iovec
677		 * until we have exhausted the residual count.
678		 */
679		bus_size_t minlen =
680			resid < iov[i].iov_len ? resid : iov[i].iov_len;
681		caddr_t addr = (caddr_t) iov[i].iov_base;
682
683		error = _bus_dmamap_load_buffer(dmat,
684				dm_segments,
685				addr, minlen,
686				td, flags, &lastaddr, &nsegs, first);
687		first = 0;
688
689		resid -= minlen;
690	}
691
692	if (error) {
693		/* force "no valid mappings" in callback */
694		(*callback)(callback_arg, dm_segments, 0, 0, error);
695	} else {
696		(*callback)(callback_arg, dm_segments,
697			    nsegs+1, uio->uio_resid, error);
698	}
699	return (error);
700}
701
702/*
703 * Release the mapping held by map.
704 */
705void
706_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
707{
708	struct bounce_page *bpage;
709
710	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
711		STAILQ_REMOVE_HEAD(&map->bpages, links);
712		free_bounce_page(dmat, bpage);
713	}
714}
715
716void
717_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
718{
719	struct bounce_page *bpage;
720
721	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
722
723		/*
724		 * Handle data bouncing.  We might also
725		 * want to add support for invalidating
726		 * the caches on broken hardware
727		 */
728		switch (op) {
729		case BUS_DMASYNC_PREWRITE:
730			while (bpage != NULL) {
731				bcopy((void *)bpage->datavaddr,
732				      (void *)bpage->vaddr,
733				      bpage->datacount);
734				bpage = STAILQ_NEXT(bpage, links);
735			}
736			break;
737
738		case BUS_DMASYNC_POSTREAD:
739			while (bpage != NULL) {
740				bcopy((void *)bpage->vaddr,
741				      (void *)bpage->datavaddr,
742				      bpage->datacount);
743				bpage = STAILQ_NEXT(bpage, links);
744			}
745			break;
746		case BUS_DMASYNC_PREREAD:
747		case BUS_DMASYNC_POSTWRITE:
748			/* No-ops */
749			break;
750		}
751	}
752}
753
754static int
755alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
756{
757	int count;
758
759	count = 0;
760	if (total_bpages == 0) {
761		STAILQ_INIT(&bounce_page_list);
762		STAILQ_INIT(&bounce_map_waitinglist);
763		STAILQ_INIT(&bounce_map_callbacklist);
764	}
765
766	while (numpages > 0) {
767		struct bounce_page *bpage;
768		int s;
769
770		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
771						     M_NOWAIT | M_ZERO);
772
773		if (bpage == NULL)
774			break;
775		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
776							 M_NOWAIT, 0ul,
777							 dmat->lowaddr,
778							 PAGE_SIZE,
779							 0);
780		if (bpage->vaddr == 0) {
781			free(bpage, M_DEVBUF);
782			break;
783		}
784		bpage->busaddr = pmap_kextract(bpage->vaddr);
785		s = splhigh();
786		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
787		total_bpages++;
788		free_bpages++;
789		splx(s);
790		count++;
791		numpages--;
792	}
793	return (count);
794}
795
796static int
797reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
798{
799	int pages;
800
801	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
802	free_bpages -= pages;
803	reserved_bpages += pages;
804	map->pagesreserved += pages;
805	pages = map->pagesneeded - map->pagesreserved;
806
807	return (pages);
808}
809
810static vm_offset_t
811add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
812		bus_size_t size)
813{
814	int s;
815	struct bounce_page *bpage;
816
817	if (map->pagesneeded == 0)
818		panic("add_bounce_page: map doesn't need any pages");
819	map->pagesneeded--;
820
821	if (map->pagesreserved == 0)
822		panic("add_bounce_page: map doesn't need any pages");
823	map->pagesreserved--;
824
825	s = splhigh();
826	bpage = STAILQ_FIRST(&bounce_page_list);
827	if (bpage == NULL)
828		panic("add_bounce_page: free page list is empty");
829
830	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
831	reserved_bpages--;
832	active_bpages++;
833	splx(s);
834
835	bpage->datavaddr = vaddr;
836	bpage->datacount = size;
837	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
838	return (bpage->busaddr);
839}
840
841static void
842free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
843{
844	int s;
845	struct bus_dmamap *map;
846
847	bpage->datavaddr = 0;
848	bpage->datacount = 0;
849
850	s = splhigh();
851	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
852	free_bpages++;
853	active_bpages--;
854	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
855		if (reserve_bounce_pages(map->dmat, map) == 0) {
856			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
857			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
858					   map, links);
859			busdma_swi_pending = 1;
860			swi_sched(vm_ih, 0);
861		}
862	}
863	splx(s);
864}
865
866void
867busdma_swi(void)
868{
869	int s;
870	struct bus_dmamap *map;
871
872	s = splhigh();
873	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
874		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
875		splx(s);
876		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
877				map->callback, map->callback_arg, /*flags*/0);
878		s = splhigh();
879	}
880	splx(s);
881}
882