busdma_machdep.c revision 115683
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 115683 2003-06-02 06:43:15Z obrien $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/bus.h>
35#include <sys/interrupt.h>
36#include <sys/kernel.h>
37#include <sys/lock.h>
38#include <sys/proc.h>
39#include <sys/mutex.h>
40#include <sys/mbuf.h>
41#include <sys/uio.h>
42
43#include <vm/vm.h>
44#include <vm/vm_page.h>
45#include <vm/vm_map.h>
46
47#include <machine/atomic.h>
48#include <machine/bus.h>
49#include <machine/md_var.h>
50
51#define MAX_BPAGES 512
52
53struct bus_dma_tag {
54	bus_dma_tag_t	  parent;
55	bus_size_t	  alignment;
56	bus_size_t	  boundary;
57	bus_addr_t	  lowaddr;
58	bus_addr_t	  highaddr;
59	bus_dma_filter_t *filter;
60	void		 *filterarg;
61	bus_size_t	  maxsize;
62	u_int		  nsegments;
63	bus_size_t	  maxsegsz;
64	int		  flags;
65	int		  ref_count;
66	int		  map_count;
67};
68
69struct bounce_page {
70	vm_offset_t	vaddr;		/* kva of bounce buffer */
71	bus_addr_t	busaddr;	/* Physical address */
72	vm_offset_t	datavaddr;	/* kva of client data */
73	bus_size_t	datacount;	/* client data count */
74	STAILQ_ENTRY(bounce_page) links;
75};
76
77int busdma_swi_pending;
78
79static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
80static int free_bpages;
81static int reserved_bpages;
82static int active_bpages;
83static int total_bpages;
84static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
85
86struct bus_dmamap {
87	struct bp_list	       bpages;
88	int		       pagesneeded;
89	int		       pagesreserved;
90	bus_dma_tag_t	       dmat;
91	void		      *buf;		/* unmapped buffer pointer */
92	bus_size_t	       buflen;		/* unmapped buffer length */
93	bus_dmamap_callback_t *callback;
94	void		      *callback_arg;
95	STAILQ_ENTRY(bus_dmamap) links;
96};
97
98static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
99static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
100static struct bus_dmamap nobounce_dmamap;
101
102static void init_bounce_pages(void *dummy);
103static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
104static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
105    				int commit);
106static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
107				   vm_offset_t vaddr, bus_size_t size);
108static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
109static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
110
111/* To protect all the the bounce pages related lists and data. */
112static struct mtx bounce_lock;
113
114/*
115 * Return true if a match is made.
116 *
117 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
118 *
119 * If paddr is within the bounds of the dma tag then call the filter callback
120 * to check for a match, if there is no filter callback then assume a match.
121 */
122static __inline int
123run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
124{
125	int retval;
126
127	retval = 0;
128	do {
129		if (paddr > dmat->lowaddr
130		 && paddr <= dmat->highaddr
131		 && (dmat->filter == NULL
132		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
133			retval = 1;
134
135		dmat = dmat->parent;
136	} while (retval == 0 && dmat != NULL);
137	return (retval);
138}
139
140#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
141/*
142 * Allocate a device specific dma_tag.
143 */
144int
145bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
146		   bus_size_t boundary, bus_addr_t lowaddr,
147		   bus_addr_t highaddr, bus_dma_filter_t *filter,
148		   void *filterarg, bus_size_t maxsize, int nsegments,
149		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
150{
151	bus_dma_tag_t newtag;
152	int error = 0;
153
154	/* Return a NULL tag on failure */
155	*dmat = NULL;
156
157	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
158	if (newtag == NULL)
159		return (ENOMEM);
160
161	newtag->parent = parent;
162	newtag->alignment = alignment;
163	newtag->boundary = boundary;
164	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
165	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
166	    (PAGE_SIZE - 1);
167	newtag->filter = filter;
168	newtag->filterarg = filterarg;
169	newtag->maxsize = maxsize;
170	newtag->nsegments = nsegments;
171	newtag->maxsegsz = maxsegsz;
172	newtag->flags = flags;
173	newtag->ref_count = 1; /* Count ourself */
174	newtag->map_count = 0;
175
176	/* Take into account any restrictions imposed by our parent tag */
177	if (parent != NULL) {
178		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
179		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
180		/*
181		 * XXX Not really correct??? Probably need to honor boundary
182		 *     all the way up the inheritence chain.
183		 */
184		newtag->boundary = MAX(parent->boundary, newtag->boundary);
185		if (newtag->filter == NULL) {
186			/*
187			 * Short circuit looking at our parent directly
188			 * since we have encapsulated all of its information
189			 */
190			newtag->filter = parent->filter;
191			newtag->filterarg = parent->filterarg;
192			newtag->parent = parent->parent;
193		}
194		if (newtag->parent != NULL)
195			atomic_add_int(&parent->ref_count, 1);
196	}
197
198	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
199	    (flags & BUS_DMA_ALLOCNOW) != 0) {
200		/* Must bounce */
201
202		if (lowaddr > bounce_lowaddr) {
203			/*
204			 * Go through the pool and kill any pages
205			 * that don't reside below lowaddr.
206			 */
207			panic("bus_dma_tag_create: page reallocation "
208			      "not implemented");
209		}
210		if (ptoa(total_bpages) < maxsize) {
211			int pages;
212
213			pages = atop(maxsize) - total_bpages;
214
215			/* Add pages to our bounce pool */
216			if (alloc_bounce_pages(newtag, pages) < pages)
217				error = ENOMEM;
218		}
219		/* Performed initial allocation */
220		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
221	}
222
223	if (error != 0) {
224		free(newtag, M_DEVBUF);
225	} else {
226		*dmat = newtag;
227	}
228	return (error);
229}
230
231int
232bus_dma_tag_destroy(bus_dma_tag_t dmat)
233{
234	if (dmat != NULL) {
235
236		if (dmat->map_count != 0)
237			return (EBUSY);
238
239		while (dmat != NULL) {
240			bus_dma_tag_t parent;
241
242			parent = dmat->parent;
243			atomic_subtract_int(&dmat->ref_count, 1);
244			if (dmat->ref_count == 0) {
245				free(dmat, M_DEVBUF);
246				/*
247				 * Last reference count, so
248				 * release our reference
249				 * count on our parent.
250				 */
251				dmat = parent;
252			} else
253				dmat = NULL;
254		}
255	}
256	return (0);
257}
258
259/*
260 * Allocate a handle for mapping from kva/uva/physical
261 * address space into bus device space.
262 */
263int
264bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
265{
266	int error;
267
268	error = 0;
269
270	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
271		/* Must bounce */
272		int maxpages;
273
274		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
275					     M_NOWAIT | M_ZERO);
276		if (*mapp == NULL)
277			return (ENOMEM);
278
279		/* Initialize the new map */
280		STAILQ_INIT(&((*mapp)->bpages));
281
282		/*
283		 * Attempt to add pages to our pool on a per-instance
284		 * basis up to a sane limit.
285		 */
286		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
287		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
288		 || (dmat->map_count > 0
289		  && total_bpages < maxpages)) {
290			int pages;
291
292			if (dmat->lowaddr > bounce_lowaddr) {
293				/*
294				 * Go through the pool and kill any pages
295				 * that don't reside below lowaddr.
296				 */
297				panic("bus_dmamap_create: page reallocation "
298				      "not implemented");
299			}
300			pages = MAX(atop(dmat->maxsize), 1);
301			pages = MIN(maxpages - total_bpages, pages);
302			if (alloc_bounce_pages(dmat, pages) < pages)
303				error = ENOMEM;
304
305			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
306				if (error == 0)
307					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
308			} else {
309				error = 0;
310			}
311		}
312	} else {
313		*mapp = NULL;
314	}
315	if (error == 0)
316		dmat->map_count++;
317	return (error);
318}
319
320/*
321 * Destroy a handle for mapping from kva/uva/physical
322 * address space into bus device space.
323 */
324int
325bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
326{
327	if (map != NULL) {
328		if (STAILQ_FIRST(&map->bpages) != NULL)
329			return (EBUSY);
330		free(map, M_DEVBUF);
331	}
332	dmat->map_count--;
333	return (0);
334}
335
336
337/*
338 * Allocate a piece of memory that can be efficiently mapped into
339 * bus device space based on the constraints lited in the dma tag.
340 * A dmamap to for use with dmamap_load is also allocated.
341 */
342int
343bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
344		 bus_dmamap_t *mapp)
345{
346	/* If we succeed, no mapping/bouncing will be required */
347	*mapp = NULL;
348
349	if ((dmat->maxsize <= PAGE_SIZE) &&
350	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
351		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
352				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
353	} else {
354		/*
355		 * XXX Use Contigmalloc until it is merged into this facility
356		 *     and handles multi-seg allocations.  Nobody is doing
357		 *     multi-seg allocations yet though.
358		 */
359		mtx_lock(&Giant);
360		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
361		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
362		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
363		    dmat->boundary);
364		mtx_unlock(&Giant);
365	}
366	if (*vaddr == NULL)
367		return (ENOMEM);
368	return (0);
369}
370
371/*
372 * Free a piece of memory and it's allociated dmamap, that was allocated
373 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
374 */
375void
376bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
377{
378	/*
379	 * dmamem does not need to be bounced, so the map should be
380	 * NULL
381	 */
382	if (map != NULL)
383		panic("bus_dmamem_free: Invalid map freed\n");
384	if ((dmat->maxsize <= PAGE_SIZE)
385	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
386		free(vaddr, M_DEVBUF);
387	else {
388		mtx_lock(&Giant);
389		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
390		mtx_unlock(&Giant);
391	}
392}
393
394/*
395 * Utility function to load a linear buffer.  lastaddrp holds state
396 * between invocations (for multiple-buffer loads).  segp contains
397 * the starting segment on entrace, and the ending segment on exit.
398 * first indicates if this is the first invocation of this function.
399 */
400static int
401_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
402    			bus_dmamap_t map,
403			bus_dma_segment_t segs[],
404			void *buf, bus_size_t buflen,
405			struct thread *td,
406			int flags,
407			bus_addr_t *lastaddrp,
408			int *segp,
409			int first)
410{
411	bus_size_t sgsize;
412	bus_addr_t curaddr, lastaddr, baddr, bmask;
413	vm_offset_t vaddr;
414	bus_addr_t paddr;
415	int needbounce = 0;
416	int seg;
417	pmap_t pmap;
418
419	if (map == NULL)
420		map = &nobounce_dmamap;
421
422	if (td != NULL)
423		pmap = vmspace_pmap(td->td_proc->p_vmspace);
424	else
425		pmap = NULL;
426
427	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
428		vm_offset_t	vendaddr;
429
430		/*
431		 * Count the number of bounce pages
432		 * needed in order to complete this transfer
433		 */
434		vaddr = trunc_page((vm_offset_t)buf);
435		vendaddr = (vm_offset_t)buf + buflen;
436
437		while (vaddr < vendaddr) {
438			paddr = pmap_kextract(vaddr);
439			if (run_filter(dmat, paddr) != 0) {
440				needbounce = 1;
441				map->pagesneeded++;
442			}
443			vaddr += PAGE_SIZE;
444		}
445	}
446
447	vaddr = (vm_offset_t)buf;
448
449	/* Reserve Necessary Bounce Pages */
450	if (map->pagesneeded != 0) {
451		mtx_lock(&bounce_lock);
452		if (flags & BUS_DMA_NOWAIT) {
453			if (reserve_bounce_pages(dmat, map, 0) != 0) {
454				mtx_unlock(&bounce_lock);
455				return (ENOMEM);
456			}
457		} else {
458			if (reserve_bounce_pages(dmat, map, 1) != 0) {
459				/* Queue us for resources */
460				map->dmat = dmat;
461				map->buf = buf;
462				map->buflen = buflen;
463				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
464								map, links);
465				mtx_unlock(&bounce_lock);
466				return (EINPROGRESS);
467			}
468		}
469		mtx_unlock(&bounce_lock);
470	}
471
472	lastaddr = *lastaddrp;
473	bmask = ~(dmat->boundary - 1);
474
475	for (seg = *segp; buflen > 0 ; ) {
476		/*
477		 * Get the physical address for this segment.
478		 */
479		if (pmap)
480			curaddr = pmap_extract(pmap, vaddr);
481		else
482			curaddr = pmap_kextract(vaddr);
483
484		/*
485		 * Compute the segment size, and adjust counts.
486		 */
487		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
488		if (buflen < sgsize)
489			sgsize = buflen;
490
491		/*
492		 * Make sure we don't cross any boundaries.
493		 */
494		if (dmat->boundary > 0) {
495			baddr = (curaddr + dmat->boundary) & bmask;
496			if (sgsize > (baddr - curaddr))
497				sgsize = (baddr - curaddr);
498		}
499
500		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
501			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
502
503		/*
504		 * Insert chunk into a segment, coalescing with
505		 * previous segment if possible.
506		 */
507		if (first) {
508			segs[seg].ds_addr = curaddr;
509			segs[seg].ds_len = sgsize;
510			first = 0;
511		} else {
512			if (needbounce == 0 && curaddr == lastaddr &&
513			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
514			    (dmat->boundary == 0 ||
515			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
516				segs[seg].ds_len += sgsize;
517			else {
518				if (++seg >= dmat->nsegments)
519					break;
520				segs[seg].ds_addr = curaddr;
521				segs[seg].ds_len = sgsize;
522			}
523		}
524
525		lastaddr = curaddr + sgsize;
526		vaddr += sgsize;
527		buflen -= sgsize;
528	}
529
530	*segp = seg;
531	*lastaddrp = lastaddr;
532
533	/*
534	 * Did we fit?
535	 */
536	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
537}
538
539#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1)
540
541/*
542 * Map the buffer buf into bus space using the dmamap map.
543 */
544int
545bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
546		bus_size_t buflen, bus_dmamap_callback_t *callback,
547		void *callback_arg, int flags)
548{
549#ifdef __GNUC__
550	bus_dma_segment_t	dm_segments[dmat->nsegments];
551#else
552	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
553#endif
554	bus_addr_t		lastaddr = 0;
555	int			error, nsegs = 0;
556
557	if (map != NULL) {
558		flags |= BUS_DMA_WAITOK;
559		map->callback = callback;
560		map->callback_arg = callback_arg;
561	}
562
563	error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen,
564	    NULL, flags, &lastaddr, &nsegs, 1);
565
566	if (error == EINPROGRESS)
567		return (error);
568
569	if (error)
570		(*callback)(callback_arg, dm_segments, 0, error);
571	else
572		(*callback)(callback_arg, dm_segments, nsegs + 1, 0);
573
574	return (0);
575}
576
577
578/*
579 * Like _bus_dmamap_load(), but for mbufs.
580 */
581int
582bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
583		     struct mbuf *m0,
584		     bus_dmamap_callback2_t *callback, void *callback_arg,
585		     int flags)
586{
587#ifdef __GNUC__
588	bus_dma_segment_t dm_segments[dmat->nsegments];
589#else
590	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
591#endif
592	int nsegs, error;
593
594	KASSERT(m0->m_flags & M_PKTHDR,
595		("bus_dmamap_load_mbuf: no packet header"));
596
597	flags |= BUS_DMA_NOWAIT;
598	nsegs = 0;
599	error = 0;
600	if (m0->m_pkthdr.len <= dmat->maxsize) {
601		int first = 1;
602		bus_addr_t lastaddr = 0;
603		struct mbuf *m;
604
605		for (m = m0; m != NULL && error == 0; m = m->m_next) {
606			if (m->m_len > 0) {
607				error = _bus_dmamap_load_buffer(dmat, map,
608						dm_segments,
609						m->m_data, m->m_len,
610						NULL, flags, &lastaddr,
611						&nsegs, first);
612				first = 0;
613			}
614		}
615	} else {
616		error = EINVAL;
617	}
618
619	if (error) {
620		/* force "no valid mappings" in callback */
621		(*callback)(callback_arg, dm_segments, 0, 0, error);
622	} else {
623		(*callback)(callback_arg, dm_segments,
624			    nsegs+1, m0->m_pkthdr.len, error);
625	}
626	return (error);
627}
628
629/*
630 * Like _bus_dmamap_load(), but for uios.
631 */
632int
633bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
634		    struct uio *uio,
635		    bus_dmamap_callback2_t *callback, void *callback_arg,
636		    int flags)
637{
638	bus_addr_t lastaddr;
639#ifdef __GNUC__
640	bus_dma_segment_t dm_segments[dmat->nsegments];
641#else
642	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
643#endif
644	int nsegs, error, first, i;
645	bus_size_t resid;
646	struct iovec *iov;
647	struct thread *td = NULL;
648
649	flags |= BUS_DMA_NOWAIT;
650	resid = uio->uio_resid;
651	iov = uio->uio_iov;
652
653	if (uio->uio_segflg == UIO_USERSPACE) {
654		td = uio->uio_td;
655		KASSERT(td != NULL,
656			("bus_dmamap_load_uio: USERSPACE but no proc"));
657	}
658
659	nsegs = 0;
660	error = 0;
661	first = 1;
662	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
663		/*
664		 * Now at the first iovec to load.  Load each iovec
665		 * until we have exhausted the residual count.
666		 */
667		bus_size_t minlen =
668			resid < iov[i].iov_len ? resid : iov[i].iov_len;
669		caddr_t addr = (caddr_t) iov[i].iov_base;
670
671		if (minlen > 0) {
672			error = _bus_dmamap_load_buffer(dmat, map,
673					dm_segments,
674					addr, minlen,
675					td, flags, &lastaddr, &nsegs, first);
676			first = 0;
677
678			resid -= minlen;
679		}
680	}
681
682	if (error) {
683		/* force "no valid mappings" in callback */
684		(*callback)(callback_arg, dm_segments, 0, 0, error);
685	} else {
686		(*callback)(callback_arg, dm_segments,
687			    nsegs+1, uio->uio_resid, error);
688	}
689	return (error);
690}
691
692/*
693 * Release the mapping held by map.
694 */
695void
696_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
697{
698	struct bounce_page *bpage;
699
700	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
701		STAILQ_REMOVE_HEAD(&map->bpages, links);
702		free_bounce_page(dmat, bpage);
703	}
704}
705
706void
707_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
708{
709	struct bounce_page *bpage;
710
711	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
712		/*
713		 * Handle data bouncing.  We might also
714		 * want to add support for invalidating
715		 * the caches on broken hardware
716		 */
717		if (op & BUS_DMASYNC_PREWRITE) {
718			while (bpage != NULL) {
719				bcopy((void *)bpage->datavaddr,
720				      (void *)bpage->vaddr,
721				      bpage->datacount);
722				bpage = STAILQ_NEXT(bpage, links);
723			}
724		}
725
726		if (op & BUS_DMASYNC_POSTREAD) {
727			while (bpage != NULL) {
728				bcopy((void *)bpage->vaddr,
729				      (void *)bpage->datavaddr,
730				      bpage->datacount);
731				bpage = STAILQ_NEXT(bpage, links);
732			}
733		}
734	}
735}
736
737static void
738init_bounce_pages(void *dummy __unused)
739{
740
741	free_bpages = 0;
742	reserved_bpages = 0;
743	active_bpages = 0;
744	total_bpages = 0;
745	STAILQ_INIT(&bounce_page_list);
746	STAILQ_INIT(&bounce_map_waitinglist);
747	STAILQ_INIT(&bounce_map_callbacklist);
748	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
749}
750SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
751
752static int
753alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
754{
755	int count;
756
757	count = 0;
758	while (numpages > 0) {
759		struct bounce_page *bpage;
760
761		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
762						     M_NOWAIT | M_ZERO);
763
764		if (bpage == NULL)
765			break;
766		mtx_lock(&Giant);
767		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
768							 M_NOWAIT, 0ul,
769							 dmat->lowaddr,
770							 PAGE_SIZE,
771							 0);
772		mtx_unlock(&Giant);
773		if (bpage->vaddr == 0) {
774			free(bpage, M_DEVBUF);
775			break;
776		}
777		bpage->busaddr = pmap_kextract(bpage->vaddr);
778		mtx_lock(&bounce_lock);
779		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
780		total_bpages++;
781		free_bpages++;
782		mtx_unlock(&bounce_lock);
783		count++;
784		numpages--;
785	}
786	return (count);
787}
788
789static int
790reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
791{
792	int pages;
793
794	mtx_assert(&bounce_lock, MA_OWNED);
795	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
796	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
797		return (map->pagesneeded - (map->pagesreserved + pages));
798	free_bpages -= pages;
799	reserved_bpages += pages;
800	map->pagesreserved += pages;
801	pages = map->pagesneeded - map->pagesreserved;
802
803	return (pages);
804}
805
806static bus_addr_t
807add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
808		bus_size_t size)
809{
810	struct bounce_page *bpage;
811
812	KASSERT(map != NULL && map != &nobounce_dmamap,
813	    ("add_bounce_page: bad map %p", map));
814
815	if (map->pagesneeded == 0)
816		panic("add_bounce_page: map doesn't need any pages");
817	map->pagesneeded--;
818
819	if (map->pagesreserved == 0)
820		panic("add_bounce_page: map doesn't need any pages");
821	map->pagesreserved--;
822
823	mtx_lock(&bounce_lock);
824	bpage = STAILQ_FIRST(&bounce_page_list);
825	if (bpage == NULL)
826		panic("add_bounce_page: free page list is empty");
827
828	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
829	reserved_bpages--;
830	active_bpages++;
831	mtx_unlock(&bounce_lock);
832
833	bpage->datavaddr = vaddr;
834	bpage->datacount = size;
835	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
836	return (bpage->busaddr);
837}
838
839static void
840free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
841{
842	struct bus_dmamap *map;
843
844	bpage->datavaddr = 0;
845	bpage->datacount = 0;
846
847	mtx_lock(&bounce_lock);
848	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
849	free_bpages++;
850	active_bpages--;
851	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
852		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
853			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
854			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
855					   map, links);
856			busdma_swi_pending = 1;
857			swi_sched(vm_ih, 0);
858		}
859	}
860	mtx_unlock(&bounce_lock);
861}
862
863void
864busdma_swi(void)
865{
866	struct bus_dmamap *map;
867
868	mtx_lock(&bounce_lock);
869	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
870		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
871		mtx_unlock(&bounce_lock);
872		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
873				map->callback, map->callback_arg, /*flags*/0);
874		mtx_lock(&bounce_lock);
875	}
876	mtx_unlock(&bounce_lock);
877}
878