busdma_machdep.c revision 131529
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 131529 2004-07-03 18:18:36Z scottl $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/bus.h>
34#include <sys/interrupt.h>
35#include <sys/kernel.h>
36#include <sys/lock.h>
37#include <sys/proc.h>
38#include <sys/mutex.h>
39#include <sys/mbuf.h>
40#include <sys/uio.h>
41#include <sys/sysctl.h>
42
43#include <vm/vm.h>
44#include <vm/vm_page.h>
45#include <vm/vm_map.h>
46
47#include <machine/atomic.h>
48#include <machine/bus.h>
49#include <machine/md_var.h>
50
51#define MAX_BPAGES 512
52
53struct bus_dma_tag {
54	bus_dma_tag_t	  parent;
55	bus_size_t	  alignment;
56	bus_size_t	  boundary;
57	bus_addr_t	  lowaddr;
58	bus_addr_t	  highaddr;
59	bus_dma_filter_t *filter;
60	void		 *filterarg;
61	bus_size_t	  maxsize;
62	u_int		  nsegments;
63	bus_size_t	  maxsegsz;
64	int		  flags;
65	int		  ref_count;
66	int		  map_count;
67	bus_dma_lock_t	 *lockfunc;
68	void		 *lockfuncarg;
69	bus_dma_segment_t *segments;
70};
71
72struct bounce_page {
73	vm_offset_t	vaddr;		/* kva of bounce buffer */
74	bus_addr_t	busaddr;	/* Physical address */
75	vm_offset_t	datavaddr;	/* kva of client data */
76	bus_size_t	datacount;	/* client data count */
77	STAILQ_ENTRY(bounce_page) links;
78};
79
80int busdma_swi_pending;
81
82static struct mtx bounce_lock;
83static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
84static int free_bpages;
85static int reserved_bpages;
86static int active_bpages;
87static int total_bpages;
88static int total_bounced;
89static int total_deferred;
90static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
91
92SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
93SYSCTL_INT(_hw_busdma, OID_AUTO, free_bpages, CTLFLAG_RD, &free_bpages, 0,
94	   "Free bounce pages");
95SYSCTL_INT(_hw_busdma, OID_AUTO, reserved_bpages, CTLFLAG_RD, &reserved_bpages,
96	   0, "Reserved bounce pages");
97SYSCTL_INT(_hw_busdma, OID_AUTO, active_bpages, CTLFLAG_RD, &active_bpages, 0,
98	   "Active bounce pages");
99SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
100	   "Total bounce pages");
101SYSCTL_INT(_hw_busdma, OID_AUTO, total_bounced, CTLFLAG_RD, &total_bounced, 0,
102	   "Total bounce requests");
103SYSCTL_INT(_hw_busdma, OID_AUTO, total_deferred, CTLFLAG_RD, &total_deferred, 0,
104	   "Total bounce requests that were deferred");
105
106struct bus_dmamap {
107	struct bp_list	       bpages;
108	int		       pagesneeded;
109	int		       pagesreserved;
110	bus_dma_tag_t	       dmat;
111	void		      *buf;		/* unmapped buffer pointer */
112	bus_size_t	       buflen;		/* unmapped buffer length */
113	bus_dmamap_callback_t *callback;
114	void		      *callback_arg;
115	STAILQ_ENTRY(bus_dmamap) links;
116};
117
118static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
119static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
120static struct bus_dmamap nobounce_dmamap;
121
122static void init_bounce_pages(void *dummy);
123static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
124static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
125				int commit);
126static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
127				   vm_offset_t vaddr, bus_size_t size);
128static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
129static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr,
130			       bus_size_t len);
131
132/*
133 * Return true if a match is made.
134 *
135 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
136 *
137 * If paddr is within the bounds of the dma tag then call the filter callback
138 * to check for a match, if there is no filter callback then assume a match.
139 */
140static __inline int
141run_filter(bus_dma_tag_t dmat, bus_addr_t paddr, bus_size_t len)
142{
143	bus_size_t bndy;
144	int retval;
145
146	retval = 0;
147	bndy = dmat->boundary;
148
149	do {
150		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
151		 || ((paddr & (dmat->alignment - 1)) != 0)
152		 || ((paddr & bndy) != ((paddr + len) & bndy)))
153		 && (dmat->filter == NULL
154		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
155			retval = 1;
156
157		dmat = dmat->parent;
158	} while (retval == 0 && dmat != NULL);
159	return (retval);
160}
161
162/*
163 * Convenience function for manipulating driver locks from busdma (during
164 * busdma_swi, for example).  Drivers that don't provide their own locks
165 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
166 * non-mutex locking scheme don't have to use this at all.
167 */
168void
169busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
170{
171	struct mtx *dmtx;
172
173	dmtx = (struct mtx *)arg;
174	switch (op) {
175	case BUS_DMA_LOCK:
176		mtx_lock(dmtx);
177		break;
178	case BUS_DMA_UNLOCK:
179		mtx_unlock(dmtx);
180		break;
181	default:
182		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
183	}
184}
185
186/*
187 * dflt_lock should never get called.  It gets put into the dma tag when
188 * lockfunc == NULL, which is only valid if the maps that are associated
189 * with the tag are meant to never be defered.
190 * XXX Should have a way to identify which driver is responsible here.
191 */
192static void
193dflt_lock(void *arg, bus_dma_lock_op_t op)
194{
195	panic("driver error: busdma dflt_lock called");
196}
197
198#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
199/*
200 * Allocate a device specific dma_tag.
201 */
202int
203bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
204		   bus_size_t boundary, bus_addr_t lowaddr,
205		   bus_addr_t highaddr, bus_dma_filter_t *filter,
206		   void *filterarg, bus_size_t maxsize, int nsegments,
207		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
208		   void *lockfuncarg, bus_dma_tag_t *dmat)
209{
210	bus_dma_tag_t newtag;
211	int error = 0;
212
213	/* Basic sanity checking */
214	if (boundary != 0 && boundary < maxsegsz)
215		maxsegsz = boundary;
216
217	/* Return a NULL tag on failure */
218	*dmat = NULL;
219
220	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
221	if (newtag == NULL)
222		return (ENOMEM);
223
224	newtag->parent = parent;
225	newtag->alignment = alignment;
226	newtag->boundary = boundary;
227	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
228	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
229	    (PAGE_SIZE - 1);
230	newtag->filter = filter;
231	newtag->filterarg = filterarg;
232	newtag->maxsize = maxsize;
233	newtag->nsegments = nsegments;
234	newtag->maxsegsz = maxsegsz;
235	newtag->flags = flags;
236	newtag->ref_count = 1; /* Count ourself */
237	newtag->map_count = 0;
238	if (lockfunc != NULL) {
239		newtag->lockfunc = lockfunc;
240		newtag->lockfuncarg = lockfuncarg;
241	} else {
242		newtag->lockfunc = dflt_lock;
243		newtag->lockfuncarg = NULL;
244	}
245	newtag->segments = NULL;
246
247	/* Take into account any restrictions imposed by our parent tag */
248	if (parent != NULL) {
249		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
250		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
251		/*
252		 * XXX Not really correct??? Probably need to honor boundary
253		 *     all the way up the inheritence chain.
254		 */
255		newtag->boundary = MAX(parent->boundary, newtag->boundary);
256		if (newtag->filter == NULL) {
257			/*
258			 * Short circuit looking at our parent directly
259			 * since we have encapsulated all of its information
260			 */
261			newtag->filter = parent->filter;
262			newtag->filterarg = parent->filterarg;
263			newtag->parent = parent->parent;
264		}
265		if (newtag->parent != NULL)
266			atomic_add_int(&parent->ref_count, 1);
267	}
268
269	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
270	    (flags & BUS_DMA_ALLOCNOW) != 0) {
271		/* Must bounce */
272
273		if (lowaddr > bounce_lowaddr) {
274			/*
275			 * Go through the pool and kill any pages
276			 * that don't reside below lowaddr.
277			 */
278			panic("bus_dma_tag_create: page reallocation "
279			      "not implemented");
280		}
281		if (ptoa(total_bpages) < maxsize) {
282			int pages;
283
284			pages = atop(maxsize) - total_bpages;
285
286			/* Add pages to our bounce pool */
287			if (alloc_bounce_pages(newtag, pages) < pages)
288				error = ENOMEM;
289		}
290		/* Performed initial allocation */
291		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
292	}
293
294	if (error != 0) {
295		free(newtag, M_DEVBUF);
296	} else {
297		*dmat = newtag;
298	}
299	return (error);
300}
301
302int
303bus_dma_tag_destroy(bus_dma_tag_t dmat)
304{
305	if (dmat != NULL) {
306
307		if (dmat->map_count != 0)
308			return (EBUSY);
309
310		while (dmat != NULL) {
311			bus_dma_tag_t parent;
312
313			parent = dmat->parent;
314			atomic_subtract_int(&dmat->ref_count, 1);
315			if (dmat->ref_count == 0) {
316				if (dmat->segments != NULL)
317					free(dmat->segments, M_DEVBUF);
318				free(dmat, M_DEVBUF);
319				/*
320				 * Last reference count, so
321				 * release our reference
322				 * count on our parent.
323				 */
324				dmat = parent;
325			} else
326				dmat = NULL;
327		}
328	}
329	return (0);
330}
331
332/*
333 * Allocate a handle for mapping from kva/uva/physical
334 * address space into bus device space.
335 */
336int
337bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
338{
339	int error;
340
341	error = 0;
342
343	if (dmat->segments == NULL) {
344		dmat->segments = (bus_dma_segment_t *)malloc(
345		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
346		    M_NOWAIT);
347		if (dmat->segments == NULL)
348			return (ENOMEM);
349	}
350
351	/*
352	 * Bouncing might be required if the driver asks for an active
353	 * exclusion region, a data alignment that is stricter than 1, and/or
354	 * an active address boundary.
355	 */
356	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)
357	 || dmat->alignment > 1 || dmat->boundary > 0) {
358		/* Must bounce */
359		int maxpages;
360
361		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
362					     M_NOWAIT | M_ZERO);
363		if (*mapp == NULL)
364			return (ENOMEM);
365
366		/* Initialize the new map */
367		STAILQ_INIT(&((*mapp)->bpages));
368
369		/*
370		 * Attempt to add pages to our pool on a per-instance
371		 * basis up to a sane limit.
372		 */
373		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
374		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
375		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
376			int pages;
377
378			if (dmat->lowaddr > bounce_lowaddr) {
379				/*
380				 * Go through the pool and kill any pages
381				 * that don't reside below lowaddr.
382				 */
383				panic("bus_dmamap_create: page reallocation "
384				      "not implemented");
385			}
386			pages = MAX(atop(dmat->maxsize), 1);
387			pages = MIN(maxpages - total_bpages, pages);
388			if (alloc_bounce_pages(dmat, pages) < pages)
389				error = ENOMEM;
390
391			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
392				if (error == 0)
393					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
394			} else {
395				error = 0;
396			}
397		}
398	} else {
399		*mapp = NULL;
400	}
401	if (error == 0)
402		dmat->map_count++;
403	return (error);
404}
405
406/*
407 * Destroy a handle for mapping from kva/uva/physical
408 * address space into bus device space.
409 */
410int
411bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
412{
413	if (map != NULL && map != &nobounce_dmamap) {
414		if (STAILQ_FIRST(&map->bpages) != NULL)
415			return (EBUSY);
416		free(map, M_DEVBUF);
417	}
418	dmat->map_count--;
419	return (0);
420}
421
422
423/*
424 * Allocate a piece of memory that can be efficiently mapped into
425 * bus device space based on the constraints lited in the dma tag.
426 * A dmamap to for use with dmamap_load is also allocated.
427 */
428int
429bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
430		 bus_dmamap_t *mapp)
431{
432	int mflags;
433
434	if (flags & BUS_DMA_NOWAIT)
435		mflags = M_NOWAIT;
436	else
437		mflags = M_WAITOK;
438	if (flags & BUS_DMA_ZERO)
439		mflags |= M_ZERO;
440
441	/* If we succeed, no mapping/bouncing will be required */
442	*mapp = NULL;
443
444	if (dmat->segments == NULL) {
445		dmat->segments = (bus_dma_segment_t *)malloc(
446		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
447		    M_NOWAIT);
448		if (dmat->segments == NULL)
449			return (ENOMEM);
450	}
451
452	if ((dmat->maxsize <= PAGE_SIZE) &&
453	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
454		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
455	} else {
456		/*
457		 * XXX Use Contigmalloc until it is merged into this facility
458		 *     and handles multi-seg allocations.  Nobody is doing
459		 *     multi-seg allocations yet though.
460		 * XXX Certain AGP hardware does.
461		 */
462		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
463		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
464		    dmat->boundary);
465	}
466	if (*vaddr == NULL)
467		return (ENOMEM);
468	return (0);
469}
470
471/*
472 * Free a piece of memory and it's allociated dmamap, that was allocated
473 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
474 */
475void
476bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
477{
478	/*
479	 * dmamem does not need to be bounced, so the map should be
480	 * NULL
481	 */
482	if (map != NULL)
483		panic("bus_dmamem_free: Invalid map freed\n");
484	if ((dmat->maxsize <= PAGE_SIZE)
485	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
486		free(vaddr, M_DEVBUF);
487	else {
488		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
489	}
490}
491
492/*
493 * Utility function to load a linear buffer.  lastaddrp holds state
494 * between invocations (for multiple-buffer loads).  segp contains
495 * the starting segment on entrace, and the ending segment on exit.
496 * first indicates if this is the first invocation of this function.
497 */
498static int
499_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
500    			bus_dmamap_t map,
501			void *buf, bus_size_t buflen,
502			struct thread *td,
503			int flags,
504			bus_addr_t *lastaddrp,
505			int *segp,
506			int first)
507{
508	bus_dma_segment_t *segs;
509	bus_size_t sgsize;
510	bus_addr_t curaddr, lastaddr, baddr, bmask;
511	vm_offset_t vaddr;
512	bus_addr_t paddr;
513	int needbounce = 0;
514	int seg;
515	pmap_t pmap;
516
517	segs = dmat->segments;
518
519	if (map == NULL)
520		map = &nobounce_dmamap;
521
522	if (td != NULL)
523		pmap = vmspace_pmap(td->td_proc->p_vmspace);
524	else
525		pmap = NULL;
526
527	if ((dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)
528	 || dmat->boundary > 0 || dmat->alignment > 1)
529	 && map->pagesneeded == 0) {
530		vm_offset_t	vendaddr;
531
532		/*
533		 * Count the number of bounce pages
534		 * needed in order to complete this transfer
535		 */
536		vaddr = trunc_page((vm_offset_t)buf);
537		vendaddr = (vm_offset_t)buf + buflen;
538
539		while (vaddr < vendaddr) {
540			paddr = pmap_kextract(vaddr);
541			if (run_filter(dmat, paddr, 0) != 0) {
542				needbounce = 1;
543				map->pagesneeded++;
544			}
545			vaddr += PAGE_SIZE;
546		}
547	}
548
549	vaddr = (vm_offset_t)buf;
550
551	/* Reserve Necessary Bounce Pages */
552	if (map->pagesneeded != 0) {
553		mtx_lock(&bounce_lock);
554		if (flags & BUS_DMA_NOWAIT) {
555			if (reserve_bounce_pages(dmat, map, 0) != 0) {
556				mtx_unlock(&bounce_lock);
557				return (ENOMEM);
558			}
559		} else {
560			if (reserve_bounce_pages(dmat, map, 1) != 0) {
561				/* Queue us for resources */
562				map->dmat = dmat;
563				map->buf = buf;
564				map->buflen = buflen;
565				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
566				    map, links);
567				mtx_unlock(&bounce_lock);
568				return (EINPROGRESS);
569			}
570		}
571		mtx_unlock(&bounce_lock);
572	}
573
574	lastaddr = *lastaddrp;
575	bmask = ~(dmat->boundary - 1);
576
577	for (seg = *segp; buflen > 0 ; ) {
578		/*
579		 * Get the physical address for this segment.
580		 */
581		if (pmap)
582			curaddr = pmap_extract(pmap, vaddr);
583		else
584			curaddr = pmap_kextract(vaddr);
585
586		/*
587		 * Compute the segment size, and adjust counts.
588		 */
589		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
590		if (buflen < sgsize)
591			sgsize = buflen;
592
593		/*
594		 * Make sure we don't cross any boundaries.
595		 */
596		if (dmat->boundary > 0) {
597			baddr = (curaddr + dmat->boundary) & bmask;
598			if (sgsize > (baddr - curaddr))
599				sgsize = (baddr - curaddr);
600		}
601
602		if (map->pagesneeded != 0 && run_filter(dmat, curaddr, sgsize))
603			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
604
605		/*
606		 * Insert chunk into a segment, coalescing with
607		 * previous segment if possible.
608		 */
609		if (first) {
610			segs[seg].ds_addr = curaddr;
611			segs[seg].ds_len = sgsize;
612			first = 0;
613		} else {
614			if (needbounce == 0 && curaddr == lastaddr &&
615			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
616			    (dmat->boundary == 0 ||
617			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
618				segs[seg].ds_len += sgsize;
619			else {
620				if (++seg >= dmat->nsegments)
621					break;
622				segs[seg].ds_addr = curaddr;
623				segs[seg].ds_len = sgsize;
624			}
625		}
626
627		lastaddr = curaddr + sgsize;
628		vaddr += sgsize;
629		buflen -= sgsize;
630	}
631
632	*segp = seg;
633	*lastaddrp = lastaddr;
634
635	/*
636	 * Did we fit?
637	 */
638	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
639}
640
641/*
642 * Map the buffer buf into bus space using the dmamap map.
643 */
644int
645bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
646		bus_size_t buflen, bus_dmamap_callback_t *callback,
647		void *callback_arg, int flags)
648{
649	bus_addr_t		lastaddr = 0;
650	int			error, nsegs = 0;
651
652	if (map != NULL) {
653		flags |= BUS_DMA_WAITOK;
654		map->callback = callback;
655		map->callback_arg = callback_arg;
656	}
657
658	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
659	     &lastaddr, &nsegs, 1);
660
661	if (error == EINPROGRESS)
662		return (error);
663
664	if (error)
665		(*callback)(callback_arg, dmat->segments, 0, error);
666	else
667		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
668
669	return (0);
670}
671
672
673/*
674 * Like _bus_dmamap_load(), but for mbufs.
675 */
676int
677bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
678		     struct mbuf *m0,
679		     bus_dmamap_callback2_t *callback, void *callback_arg,
680		     int flags)
681{
682	int nsegs, error;
683
684	M_ASSERTPKTHDR(m0);
685
686	flags |= BUS_DMA_NOWAIT;
687	nsegs = 0;
688	error = 0;
689	if (m0->m_pkthdr.len <= dmat->maxsize) {
690		int first = 1;
691		bus_addr_t lastaddr = 0;
692		struct mbuf *m;
693
694		for (m = m0; m != NULL && error == 0; m = m->m_next) {
695			if (m->m_len > 0) {
696				error = _bus_dmamap_load_buffer(dmat, map,
697						m->m_data, m->m_len,
698						NULL, flags, &lastaddr,
699						&nsegs, first);
700				first = 0;
701			}
702		}
703	} else {
704		error = EINVAL;
705	}
706
707	if (error) {
708		/* force "no valid mappings" in callback */
709		(*callback)(callback_arg, dmat->segments, 0, 0, error);
710	} else {
711		(*callback)(callback_arg, dmat->segments,
712			    nsegs+1, m0->m_pkthdr.len, error);
713	}
714	return (error);
715}
716
717/*
718 * Like _bus_dmamap_load(), but for uios.
719 */
720int
721bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
722		    struct uio *uio,
723		    bus_dmamap_callback2_t *callback, void *callback_arg,
724		    int flags)
725{
726	bus_addr_t lastaddr;
727	int nsegs, error, first, i;
728	bus_size_t resid;
729	struct iovec *iov;
730	struct thread *td = NULL;
731
732	flags |= BUS_DMA_NOWAIT;
733	resid = uio->uio_resid;
734	iov = uio->uio_iov;
735
736	if (uio->uio_segflg == UIO_USERSPACE) {
737		td = uio->uio_td;
738		KASSERT(td != NULL,
739			("bus_dmamap_load_uio: USERSPACE but no proc"));
740	}
741
742	nsegs = 0;
743	error = 0;
744	first = 1;
745	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
746		/*
747		 * Now at the first iovec to load.  Load each iovec
748		 * until we have exhausted the residual count.
749		 */
750		bus_size_t minlen =
751			resid < iov[i].iov_len ? resid : iov[i].iov_len;
752		caddr_t addr = (caddr_t) iov[i].iov_base;
753
754		if (minlen > 0) {
755			error = _bus_dmamap_load_buffer(dmat, map,
756					addr, minlen,
757					td, flags, &lastaddr, &nsegs, first);
758			first = 0;
759
760			resid -= minlen;
761		}
762	}
763
764	if (error) {
765		/* force "no valid mappings" in callback */
766		(*callback)(callback_arg, dmat->segments, 0, 0, error);
767	} else {
768		(*callback)(callback_arg, dmat->segments,
769			    nsegs+1, uio->uio_resid, error);
770	}
771	return (error);
772}
773
774/*
775 * Release the mapping held by map.
776 */
777void
778_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
779{
780	struct bounce_page *bpage;
781
782	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
783		STAILQ_REMOVE_HEAD(&map->bpages, links);
784		free_bounce_page(dmat, bpage);
785	}
786}
787
788void
789_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
790{
791	struct bounce_page *bpage;
792
793	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
794		/*
795		 * Handle data bouncing.  We might also
796		 * want to add support for invalidating
797		 * the caches on broken hardware
798		 */
799		total_bounced++;
800
801		if (op & BUS_DMASYNC_PREWRITE) {
802			while (bpage != NULL) {
803				bcopy((void *)bpage->datavaddr,
804				      (void *)bpage->vaddr,
805				      bpage->datacount);
806				bpage = STAILQ_NEXT(bpage, links);
807			}
808		}
809
810		if (op & BUS_DMASYNC_POSTREAD) {
811			while (bpage != NULL) {
812				bcopy((void *)bpage->vaddr,
813				      (void *)bpage->datavaddr,
814				      bpage->datacount);
815				bpage = STAILQ_NEXT(bpage, links);
816			}
817		}
818	}
819}
820
821static void
822init_bounce_pages(void *dummy __unused)
823{
824
825	free_bpages = 0;
826	reserved_bpages = 0;
827	active_bpages = 0;
828	total_bpages = 0;
829	STAILQ_INIT(&bounce_page_list);
830	STAILQ_INIT(&bounce_map_waitinglist);
831	STAILQ_INIT(&bounce_map_callbacklist);
832	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
833}
834SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
835
836static int
837alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
838{
839	int count;
840
841	count = 0;
842	while (numpages > 0) {
843		struct bounce_page *bpage;
844
845		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
846						     M_NOWAIT | M_ZERO);
847
848		if (bpage == NULL)
849			break;
850		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
851							 M_NOWAIT, 0ul,
852							 dmat->lowaddr,
853							 PAGE_SIZE,
854							 dmat->boundary);
855		if (bpage->vaddr == 0) {
856			free(bpage, M_DEVBUF);
857			break;
858		}
859		bpage->busaddr = pmap_kextract(bpage->vaddr);
860		mtx_lock(&bounce_lock);
861		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
862		total_bpages++;
863		free_bpages++;
864		mtx_unlock(&bounce_lock);
865		count++;
866		numpages--;
867	}
868	return (count);
869}
870
871static int
872reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
873{
874	int pages;
875
876	mtx_assert(&bounce_lock, MA_OWNED);
877	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
878	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
879		return (map->pagesneeded - (map->pagesreserved + pages));
880	free_bpages -= pages;
881	reserved_bpages += pages;
882	map->pagesreserved += pages;
883	pages = map->pagesneeded - map->pagesreserved;
884
885	return (pages);
886}
887
888static bus_addr_t
889add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
890		bus_size_t size)
891{
892	struct bounce_page *bpage;
893
894	KASSERT(map != NULL && map != &nobounce_dmamap,
895	    ("add_bounce_page: bad map %p", map));
896
897	if (map->pagesneeded == 0)
898		panic("add_bounce_page: map doesn't need any pages");
899	map->pagesneeded--;
900
901	if (map->pagesreserved == 0)
902		panic("add_bounce_page: map doesn't need any pages");
903	map->pagesreserved--;
904
905	mtx_lock(&bounce_lock);
906	bpage = STAILQ_FIRST(&bounce_page_list);
907	if (bpage == NULL)
908		panic("add_bounce_page: free page list is empty");
909
910	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
911	reserved_bpages--;
912	active_bpages++;
913	mtx_unlock(&bounce_lock);
914
915	bpage->datavaddr = vaddr;
916	bpage->datacount = size;
917	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
918	return (bpage->busaddr);
919}
920
921static void
922free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
923{
924	struct bus_dmamap *map;
925
926	bpage->datavaddr = 0;
927	bpage->datacount = 0;
928
929	mtx_lock(&bounce_lock);
930	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
931	free_bpages++;
932	active_bpages--;
933	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
934		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
935			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
936			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
937					   map, links);
938			busdma_swi_pending = 1;
939			total_deferred++;
940			swi_sched(vm_ih, 0);
941		}
942	}
943	mtx_unlock(&bounce_lock);
944}
945
946void
947busdma_swi(void)
948{
949	bus_dma_tag_t dmat;
950	struct bus_dmamap *map;
951
952	mtx_lock(&bounce_lock);
953	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
954		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
955		mtx_unlock(&bounce_lock);
956		dmat = map->dmat;
957		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
958		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
959				map->callback, map->callback_arg, /*flags*/0);
960		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
961		mtx_lock(&bounce_lock);
962	}
963	mtx_unlock(&bounce_lock);
964}
965