busdma_machdep.c revision 132544
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 132544 2004-07-22 15:46:51Z scottl $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/bus.h>
34#include <sys/interrupt.h>
35#include <sys/kernel.h>
36#include <sys/lock.h>
37#include <sys/proc.h>
38#include <sys/mutex.h>
39#include <sys/mbuf.h>
40#include <sys/uio.h>
41#include <sys/sysctl.h>
42
43#include <vm/vm.h>
44#include <vm/vm_page.h>
45#include <vm/vm_map.h>
46
47#include <machine/atomic.h>
48#include <machine/bus.h>
49#include <machine/md_var.h>
50
51#define MAX_BPAGES 512
52
53struct bounce_page {
54	vm_offset_t	vaddr;		/* kva of bounce buffer */
55	bus_addr_t	busaddr;	/* Physical address */
56	vm_offset_t	datavaddr;	/* kva of client data */
57	bus_size_t	datacount;	/* client data count */
58	STAILQ_ENTRY(bounce_page) links;
59};
60
61struct bus_dma_tag {
62	bus_dma_tag_t	  parent;
63	bus_size_t	  alignment;
64	bus_size_t	  boundary;
65	bus_addr_t	  lowaddr;
66	bus_addr_t	  highaddr;
67	bus_dma_filter_t *filter;
68	void		 *filterarg;
69	bus_size_t	  maxsize;
70	u_int		  nsegments;
71	bus_size_t	  maxsegsz;
72	int		  flags;
73	int		  ref_count;
74	int		  map_count;
75	bus_dma_lock_t	 *lockfunc;
76	void		 *lockfuncarg;
77	bus_dma_segment_t *segments;
78	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
79};
80
81int busdma_swi_pending;
82
83/*
84 * In order to reduce extra lock/unlock sequences in the main bounce path,
85 * this lock covers the global bounce lists and the dmat bounce lists.
86 */
87static struct mtx bounce_lock;
88static int free_bpages;
89static int reserved_bpages;
90static int active_bpages;
91static int total_bpages;
92static int total_bounced;
93static int total_deferred;
94static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
95
96SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
97SYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
98	   "Free bounce pages");
99SYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
100	   0, "Reserved bounce pages");
101SYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
102	   "Active bounce pages");
103SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
104	   "Total bounce pages");
105SYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
106	   "Total bounce requests");
107SYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred, 0,
108	   "Total bounce requests that were deferred");
109
110struct bus_dmamap {
111	struct bp_list	       bpages;
112	int		       pagesneeded;
113	int		       pagesreserved;
114	bus_dma_tag_t	       dmat;
115	void		      *buf;		/* unmapped buffer pointer */
116	bus_size_t	       buflen;		/* unmapped buffer length */
117	bus_dmamap_callback_t *callback;
118	void		      *callback_arg;
119	STAILQ_ENTRY(bus_dmamap) links;
120};
121
122static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
123static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
124static struct bus_dmamap nobounce_dmamap;
125
126static void init_bounce_pages(void *dummy);
127static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
128static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
129				int commit);
130static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
131				   vm_offset_t vaddr, bus_size_t size);
132static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
133static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
134			       bus_size_t len, struct bus_dmamap *map);
135
136/*
137 * Return true if a match is made.
138 *
139 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
140 *
141 * If paddr is within the bounds of the dma tag then call the filter callback
142 * to check for a match, if there is no filter callback then assume a match.
143 */
144static __inline int
145run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len, struct bus_dmamap *map)
146{
147	bus_addr_t endaddr;
148	bus_size_t bndy;
149	int retval;
150
151	retval = 0;
152	bndy = dmat->boundary;
153	endaddr = paddr + len - 1;
154
155	do {
156		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
157		 || ((paddr & (dmat->alignment - 1)) != 0)
158		 || ((paddr & bndy) != (endaddr & bndy)))
159		 && (dmat->filter == NULL
160		  || (*dmat->filter)(dmat->filterarg, paddr) != 0)) {
161			if ((paddr & (dmat->alignment - 1)) != 0)
162				printf("bouncing due alignment, paddr= 0x%lx, map= %p\n", (u_long)paddr, map);
163			if ((paddr & bndy) != (endaddr & bndy)) {
164				printf("bouncing due to boundary, paddr= 0x%lx, endaddr= 0x%lx, map= %p\n", (u_long)paddr, (u_long)endaddr, map);
165				printf("paddr & bndy = 0x%lx, paddr + len & bndy = 0x%lx\n", (u_long)(paddr & bndy), (u_long)(endaddr & bndy));
166			}
167			retval = 1;
168		}
169
170		dmat = dmat->parent;
171	} while (retval == 0 && dmat != NULL);
172	return (retval);
173}
174
175/*
176 * Convenience function for manipulating driver locks from busdma (during
177 * busdma_swi, for example).  Drivers that don't provide their own locks
178 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
179 * non-mutex locking scheme don't have to use this at all.
180 */
181void
182busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
183{
184	struct mtx *dmtx;
185
186	dmtx = (struct mtx *)arg;
187	switch (op) {
188	case BUS_DMA_LOCK:
189		mtx_lock(dmtx);
190		break;
191	case BUS_DMA_UNLOCK:
192		mtx_unlock(dmtx);
193		break;
194	default:
195		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
196	}
197}
198
199/*
200 * dflt_lock should never get called.  It gets put into the dma tag when
201 * lockfunc == NULL, which is only valid if the maps that are associated
202 * with the tag are meant to never be defered.
203 * XXX Should have a way to identify which driver is responsible here.
204 */
205static void
206dflt_lock(void *arg, bus_dma_lock_op_t op)
207{
208	panic("driver error: busdma dflt_lock called");
209}
210
211#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
212/*
213 * Allocate a device specific dma_tag.
214 */
215int
216bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
217		   bus_size_t boundary, bus_addr_t lowaddr,
218		   bus_addr_t highaddr, bus_dma_filter_t *filter,
219		   void *filterarg, bus_size_t maxsize, int nsegments,
220		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
221		   void *lockfuncarg, bus_dma_tag_t *dmat)
222{
223	bus_dma_tag_t newtag;
224	int error = 0;
225
226	/* Basic sanity checking */
227	if (boundary != 0 && boundary < maxsegsz)
228		maxsegsz = boundary;
229
230	/* Return a NULL tag on failure */
231	*dmat = NULL;
232
233	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
234	if (newtag == NULL)
235		return (ENOMEM);
236
237	STAILQ_INIT(&newtag->bounce_page_list);
238	newtag->parent = parent;
239	newtag->alignment = alignment;
240	newtag->boundary = boundary;
241	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
242	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
243	    (PAGE_SIZE - 1);
244	newtag->filter = filter;
245	newtag->filterarg = filterarg;
246	newtag->maxsize = maxsize;
247	newtag->nsegments = nsegments;
248	newtag->maxsegsz = maxsegsz;
249	newtag->flags = flags;
250	newtag->ref_count = 1; /* Count ourself */
251	newtag->map_count = 0;
252	if (lockfunc != NULL) {
253		newtag->lockfunc = lockfunc;
254		newtag->lockfuncarg = lockfuncarg;
255	} else {
256		newtag->lockfunc = dflt_lock;
257		newtag->lockfuncarg = NULL;
258	}
259	newtag->segments = NULL;
260
261	/* Take into account any restrictions imposed by our parent tag */
262	if (parent != NULL) {
263		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
264		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
265		/*
266		 * XXX Not really correct??? Probably need to honor boundary
267		 *     all the way up the inheritence chain.
268		 */
269		newtag->boundary = MAX(parent->boundary, newtag->boundary);
270		if (newtag->filter == NULL) {
271			/*
272			 * Short circuit looking at our parent directly
273			 * since we have encapsulated all of its information
274			 */
275			newtag->filter = parent->filter;
276			newtag->filterarg = parent->filterarg;
277			newtag->parent = parent->parent;
278		}
279		if (newtag->parent != NULL)
280			atomic_add_int(&parent->ref_count, 1);
281	}
282
283	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
284	    (flags & BUS_DMA_ALLOCNOW) != 0) {
285		/* Must bounce */
286
287		if (lowaddr > bounce_lowaddr) {
288			/*
289			 * Go through the pool and kill any pages
290			 * that don't reside below lowaddr.
291			 */
292			panic("bus_dma_tag_create: page reallocation "
293			      "not implemented");
294		}
295		if (ptoa(total_bpages) < maxsize) {
296			int pages;
297
298			pages = atop(maxsize) - total_bpages;
299
300			/* Add pages to our bounce pool */
301			if (alloc_bounce_pages(newtag, pages) < pages)
302				error = ENOMEM;
303		}
304		/* Performed initial allocation */
305		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
306	}
307
308	if (error != 0) {
309		free(newtag, M_DEVBUF);
310	} else {
311		*dmat = newtag;
312	}
313	return (error);
314}
315
316int
317bus_dma_tag_destroy(bus_dma_tag_t dmat)
318{
319	if (dmat != NULL) {
320
321		if (dmat->map_count != 0)
322			return (EBUSY);
323
324		while (dmat != NULL) {
325			bus_dma_tag_t parent;
326
327			parent = dmat->parent;
328			atomic_subtract_int(&dmat->ref_count, 1);
329			if (dmat->ref_count == 0) {
330				if (dmat->segments != NULL)
331					free(dmat->segments, M_DEVBUF);
332				free(dmat, M_DEVBUF);
333				/*
334				 * Last reference count, so
335				 * release our reference
336				 * count on our parent.
337				 */
338				dmat = parent;
339			} else
340				dmat = NULL;
341		}
342	}
343	return (0);
344}
345
346/*
347 * Allocate a handle for mapping from kva/uva/physical
348 * address space into bus device space.
349 */
350int
351bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
352{
353	int error;
354
355	error = 0;
356
357	if (dmat->segments == NULL) {
358		dmat->segments = (bus_dma_segment_t *)malloc(
359		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
360		    M_NOWAIT);
361		if (dmat->segments == NULL)
362			return (ENOMEM);
363	}
364
365	/*
366	 * Bouncing might be required if the driver asks for an active
367	 * exclusion region, a data alignment that is stricter than 1, and/or
368	 * an active address boundary.
369	 */
370	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)
371	 || dmat->alignment > 1 || dmat->boundary > 0) {
372		/* Must bounce */
373		int maxpages;
374
375		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
376					     M_NOWAIT | M_ZERO);
377		if (*mapp == NULL)
378			return (ENOMEM);
379
380		/* Initialize the new map */
381		STAILQ_INIT(&((*mapp)->bpages));
382
383		/*
384		 * Attempt to add pages to our pool on a per-instance
385		 * basis up to a sane limit.
386		 */
387		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
388		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
389		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
390			int pages;
391
392			if (dmat->lowaddr > bounce_lowaddr) {
393				/*
394				 * Go through the pool and kill any pages
395				 * that don't reside below lowaddr.
396				 */
397				panic("bus_dmamap_create: page reallocation "
398				      "not implemented");
399			}
400			pages = MAX(atop(dmat->maxsize), 1);
401			pages = MIN(maxpages - total_bpages, pages);
402			if (alloc_bounce_pages(dmat, pages) < pages)
403				error = ENOMEM;
404
405			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
406				if (error == 0)
407					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
408			} else {
409				error = 0;
410			}
411		}
412	} else {
413		*mapp = NULL;
414	}
415	if (error == 0)
416		dmat->map_count++;
417	return (error);
418}
419
420/*
421 * Destroy a handle for mapping from kva/uva/physical
422 * address space into bus device space.
423 */
424int
425bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
426{
427	if (map != NULL && map != &nobounce_dmamap) {
428		if (STAILQ_FIRST(&map->bpages) != NULL)
429			return (EBUSY);
430		free(map, M_DEVBUF);
431	}
432	dmat->map_count--;
433	return (0);
434}
435
436
437/*
438 * Allocate a piece of memory that can be efficiently mapped into
439 * bus device space based on the constraints lited in the dma tag.
440 * A dmamap to for use with dmamap_load is also allocated.
441 */
442int
443bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
444		 bus_dmamap_t *mapp)
445{
446	int mflags;
447
448	if (flags & BUS_DMA_NOWAIT)
449		mflags = M_NOWAIT;
450	else
451		mflags = M_WAITOK;
452	if (flags & BUS_DMA_ZERO)
453		mflags |= M_ZERO;
454
455	/* If we succeed, no mapping/bouncing will be required */
456	*mapp = NULL;
457
458	if (dmat->segments == NULL) {
459		dmat->segments = (bus_dma_segment_t *)malloc(
460		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
461		    M_NOWAIT);
462		if (dmat->segments == NULL)
463			return (ENOMEM);
464	}
465
466	if ((dmat->maxsize <= PAGE_SIZE) &&
467	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
468		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
469	} else {
470		/*
471		 * XXX Use Contigmalloc until it is merged into this facility
472		 *     and handles multi-seg allocations.  Nobody is doing
473		 *     multi-seg allocations yet though.
474		 * XXX Certain AGP hardware does.
475		 */
476		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
477		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
478		    dmat->boundary);
479	}
480	if (*vaddr == NULL)
481		return (ENOMEM);
482	return (0);
483}
484
485/*
486 * Free a piece of memory and it's allociated dmamap, that was allocated
487 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
488 */
489void
490bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
491{
492	/*
493	 * dmamem does not need to be bounced, so the map should be
494	 * NULL
495	 */
496	if (map != NULL)
497		panic("bus_dmamem_free: Invalid map freed\n");
498	if ((dmat->maxsize <= PAGE_SIZE)
499	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
500		free(vaddr, M_DEVBUF);
501	else {
502		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
503	}
504}
505
506/*
507 * Utility function to load a linear buffer.  lastaddrp holds state
508 * between invocations (for multiple-buffer loads).  segp contains
509 * the starting segment on entrace, and the ending segment on exit.
510 * first indicates if this is the first invocation of this function.
511 */
512static int
513_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
514    			bus_dmamap_t map,
515			void *buf, bus_size_t buflen,
516			struct thread *td,
517			int flags,
518			bus_addr_t *lastaddrp,
519			int *segp,
520			int first)
521{
522	bus_dma_segment_t *segs;
523	bus_size_t sgsize;
524	bus_addr_t curaddr, lastaddr, baddr, bmask;
525	vm_offset_t vaddr;
526	bus_addr_t paddr;
527	int needbounce = 0;
528	int seg;
529	pmap_t pmap;
530
531	segs = dmat->segments;
532
533	if (map == NULL)
534		map = &nobounce_dmamap;
535
536	if (td != NULL)
537		pmap = vmspace_pmap(td->td_proc->p_vmspace);
538	else
539		pmap = NULL;
540
541	if ((dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)
542	 || dmat->boundary > 0 || dmat->alignment > 1)
543	 && map != &nobounce_dmamap && map->pagesneeded == 0) {
544		vm_offset_t	vendaddr;
545
546		/*
547		 * Count the number of bounce pages
548		 * needed in order to complete this transfer
549		 */
550		vaddr = trunc_page((vm_offset_t)buf);
551		vendaddr = (vm_offset_t)buf + buflen;
552
553		while (vaddr < vendaddr) {
554			paddr = pmap_kextract(vaddr);
555			if (run_filter(dmat, paddr, PAGE_SIZE, map) != 0) {
556				needbounce = 1;
557				map->pagesneeded++;
558			}
559			vaddr += PAGE_SIZE;
560		}
561	}
562
563	vaddr = (vm_offset_t)buf;
564
565	/* Reserve Necessary Bounce Pages */
566	if (map->pagesneeded != 0) {
567		mtx_lock(&bounce_lock);
568		if (flags & BUS_DMA_NOWAIT) {
569			if (reserve_bounce_pages(dmat, map, 0) != 0) {
570				mtx_unlock(&bounce_lock);
571				return (ENOMEM);
572			}
573		} else {
574			if (reserve_bounce_pages(dmat, map, 1) != 0) {
575				/*
576				 * Queue us for resources.  A future
577				 * optimization might be to search other bounce
578				 * lists for extra pages.
579				 */
580				map->dmat = dmat;
581				map->buf = buf;
582				map->buflen = buflen;
583				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
584				    map, links);
585				mtx_unlock(&bounce_lock);
586				return (EINPROGRESS);
587			}
588		}
589		mtx_unlock(&bounce_lock);
590	}
591
592	lastaddr = *lastaddrp;
593	bmask = ~(dmat->boundary - 1);
594
595	for (seg = *segp; buflen > 0 ; ) {
596		/*
597		 * Get the physical address for this segment.
598		 */
599		if (pmap)
600			curaddr = pmap_extract(pmap, vaddr);
601		else
602			curaddr = pmap_kextract(vaddr);
603
604		/*
605		 * Compute the segment size, and adjust counts.
606		 */
607		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
608		if (buflen < sgsize)
609			sgsize = buflen;
610
611		/*
612		 * Make sure we don't cross any boundaries.
613		 */
614		if (dmat->boundary > 0) {
615			baddr = (curaddr + dmat->boundary) & bmask;
616			if (sgsize > (baddr - curaddr))
617				sgsize = (baddr - curaddr);
618		}
619
620		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize, map))
621			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
622
623		/*
624		 * Insert chunk into a segment, coalescing with
625		 * previous segment if possible.
626		 */
627		if (first) {
628			segs[seg].ds_addr = curaddr;
629			segs[seg].ds_len = sgsize;
630			first = 0;
631		} else {
632			if (needbounce == 0 && curaddr == lastaddr &&
633			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
634			    (dmat->boundary == 0 ||
635			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
636				segs[seg].ds_len += sgsize;
637			else {
638				if (++seg >= dmat->nsegments)
639					break;
640				segs[seg].ds_addr = curaddr;
641				segs[seg].ds_len = sgsize;
642			}
643		}
644
645		lastaddr = curaddr + sgsize;
646		vaddr += sgsize;
647		buflen -= sgsize;
648	}
649
650	*segp = seg;
651	*lastaddrp = lastaddr;
652
653	/*
654	 * Did we fit?
655	 */
656	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
657}
658
659/*
660 * Map the buffer buf into bus space using the dmamap map.
661 */
662int
663bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
664		bus_size_t buflen, bus_dmamap_callback_t *callback,
665		void *callback_arg, int flags)
666{
667	bus_addr_t		lastaddr = 0;
668	int			error, nsegs = 0;
669
670	if (map != NULL) {
671		flags |= BUS_DMA_WAITOK;
672		map->callback = callback;
673		map->callback_arg = callback_arg;
674	}
675
676	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
677	     &lastaddr, &nsegs, 1);
678
679	if (error == EINPROGRESS)
680		return (error);
681
682	if (error)
683		(*callback)(callback_arg, dmat->segments, 0, error);
684	else
685		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
686
687	return (0);
688}
689
690
691/*
692 * Like _bus_dmamap_load(), but for mbufs.
693 */
694int
695bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
696		     struct mbuf *m0,
697		     bus_dmamap_callback2_t *callback, void *callback_arg,
698		     int flags)
699{
700	int nsegs, error;
701
702	M_ASSERTPKTHDR(m0);
703
704	flags |= BUS_DMA_NOWAIT;
705	nsegs = 0;
706	error = 0;
707	if (m0->m_pkthdr.len <= dmat->maxsize) {
708		int first = 1;
709		bus_addr_t lastaddr = 0;
710		struct mbuf *m;
711
712		for (m = m0; m != NULL && error == 0; m = m->m_next) {
713			if (m->m_len > 0) {
714				error = _bus_dmamap_load_buffer(dmat, map,
715						m->m_data, m->m_len,
716						NULL, flags, &lastaddr,
717						&nsegs, first);
718				first = 0;
719			}
720		}
721	} else {
722		error = EINVAL;
723	}
724
725	if (error) {
726		/* force "no valid mappings" in callback */
727		(*callback)(callback_arg, dmat->segments, 0, 0, error);
728	} else {
729		(*callback)(callback_arg, dmat->segments,
730			    nsegs+1, m0->m_pkthdr.len, error);
731	}
732	return (error);
733}
734
735/*
736 * Like _bus_dmamap_load(), but for uios.
737 */
738int
739bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
740		    struct uio *uio,
741		    bus_dmamap_callback2_t *callback, void *callback_arg,
742		    int flags)
743{
744	bus_addr_t lastaddr;
745	int nsegs, error, first, i;
746	bus_size_t resid;
747	struct iovec *iov;
748	struct thread *td = NULL;
749
750	flags |= BUS_DMA_NOWAIT;
751	resid = uio->uio_resid;
752	iov = uio->uio_iov;
753
754	if (uio->uio_segflg == UIO_USERSPACE) {
755		td = uio->uio_td;
756		KASSERT(td != NULL,
757			("bus_dmamap_load_uio: USERSPACE but no proc"));
758	}
759
760	nsegs = 0;
761	error = 0;
762	first = 1;
763	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
764		/*
765		 * Now at the first iovec to load.  Load each iovec
766		 * until we have exhausted the residual count.
767		 */
768		bus_size_t minlen =
769			resid < iov[i].iov_len ? resid : iov[i].iov_len;
770		caddr_t addr = (caddr_t) iov[i].iov_base;
771
772		if (minlen > 0) {
773			error = _bus_dmamap_load_buffer(dmat, map,
774					addr, minlen,
775					td, flags, &lastaddr, &nsegs, first);
776			first = 0;
777
778			resid -= minlen;
779		}
780	}
781
782	if (error) {
783		/* force "no valid mappings" in callback */
784		(*callback)(callback_arg, dmat->segments, 0, 0, error);
785	} else {
786		(*callback)(callback_arg, dmat->segments,
787			    nsegs+1, uio->uio_resid, error);
788	}
789	return (error);
790}
791
792/*
793 * Release the mapping held by map.
794 */
795void
796_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
797{
798	struct bounce_page *bpage;
799
800	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
801		STAILQ_REMOVE_HEAD(&map->bpages, links);
802		free_bounce_page(dmat, bpage);
803	}
804}
805
806void
807_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
808{
809	struct bounce_page *bpage;
810
811	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
812		/*
813		 * Handle data bouncing.  We might also
814		 * want to add support for invalidating
815		 * the caches on broken hardware
816		 */
817		total_bounced++;
818
819		if (op & BUS_DMASYNC_PREWRITE) {
820			while (bpage != NULL) {
821				bcopy((void *)bpage->datavaddr,
822				      (void *)bpage->vaddr,
823				      bpage->datacount);
824				bpage = STAILQ_NEXT(bpage, links);
825			}
826		}
827
828		if (op & BUS_DMASYNC_POSTREAD) {
829			while (bpage != NULL) {
830				bcopy((void *)bpage->vaddr,
831				      (void *)bpage->datavaddr,
832				      bpage->datacount);
833				bpage = STAILQ_NEXT(bpage, links);
834			}
835		}
836	}
837}
838
839static void
840init_bounce_pages(void *dummy __unused)
841{
842
843	free_bpages = 0;
844	reserved_bpages = 0;
845	active_bpages = 0;
846	total_bpages = 0;
847	STAILQ_INIT(&bounce_map_waitinglist);
848	STAILQ_INIT(&bounce_map_callbacklist);
849	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
850}
851SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
852
853static int
854alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
855{
856	int count;
857
858	count = 0;
859	while (numpages > 0) {
860		struct bounce_page *bpage;
861
862		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
863						     M_NOWAIT | M_ZERO);
864
865		if (bpage == NULL)
866			break;
867		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
868							 M_NOWAIT, 0ul,
869							 dmat->lowaddr,
870							 dmat->alignment?
871							 dmat->alignment : 1ul,
872							 dmat->boundary);
873		if (bpage->vaddr == 0) {
874			free(bpage, M_DEVBUF);
875			break;
876		}
877		bpage->busaddr = pmap_kextract(bpage->vaddr);
878		mtx_lock(&bounce_lock);
879		STAILQ_INSERT_TAIL(&dmat->bounce_page_list, bpage, links);
880		total_bpages++;
881		free_bpages++;
882		mtx_unlock(&bounce_lock);
883		count++;
884		numpages--;
885	}
886	return (count);
887}
888
889static int
890reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
891{
892	int pages;
893
894	mtx_assert(&bounce_lock, MA_OWNED);
895	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
896	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
897		return (map->pagesneeded - (map->pagesreserved + pages));
898	free_bpages -= pages;
899	reserved_bpages += pages;
900	map->pagesreserved += pages;
901	pages = map->pagesneeded - map->pagesreserved;
902
903	return (pages);
904}
905
906static bus_addr_t
907add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
908		bus_size_t size)
909{
910	struct bounce_page *bpage;
911
912	KASSERT(map != NULL && map != &nobounce_dmamap,
913	    ("add_bounce_page: bad map %p", map));
914
915	if (map->pagesneeded == 0)
916		panic("add_bounce_page: map doesn't need any pages");
917	map->pagesneeded--;
918
919	if (map->pagesreserved == 0)
920		panic("add_bounce_page: map doesn't need any pages");
921	map->pagesreserved--;
922
923	mtx_lock(&bounce_lock);
924	bpage = STAILQ_FIRST(&dmat->bounce_page_list);
925	if (bpage == NULL)
926		panic("add_bounce_page: free page list is empty");
927
928	STAILQ_REMOVE_HEAD(&dmat->bounce_page_list, links);
929	reserved_bpages--;
930	active_bpages++;
931	mtx_unlock(&bounce_lock);
932
933	bpage->datavaddr = vaddr;
934	bpage->datacount = size;
935	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
936	return (bpage->busaddr);
937}
938
939static void
940free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
941{
942	struct bus_dmamap *map;
943
944	bpage->datavaddr = 0;
945	bpage->datacount = 0;
946
947	mtx_lock(&bounce_lock);
948	STAILQ_INSERT_HEAD(&dmat->bounce_page_list, bpage, links);
949	free_bpages++;
950	active_bpages--;
951	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
952		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
953			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
954			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
955					   map, links);
956			busdma_swi_pending = 1;
957			total_deferred++;
958			swi_sched(vm_ih, 0);
959		}
960	}
961	mtx_unlock(&bounce_lock);
962}
963
964void
965busdma_swi(void)
966{
967	bus_dma_tag_t dmat;
968	struct bus_dmamap *map;
969
970	mtx_lock(&bounce_lock);
971	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
972		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
973		mtx_unlock(&bounce_lock);
974		dmat = map->dmat;
975		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
976		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
977				map->callback, map->callback_arg, /*flags*/0);
978		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
979		mtx_lock(&bounce_lock);
980	}
981	mtx_unlock(&bounce_lock);
982}
983