busdma_machdep.c revision 116907
1/*
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 116907 2003-06-27 08:31:48Z scottl $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/bus.h>
35#include <sys/interrupt.h>
36#include <sys/kernel.h>
37#include <sys/lock.h>
38#include <sys/proc.h>
39#include <sys/mutex.h>
40#include <sys/mbuf.h>
41#include <sys/uio.h>
42
43#include <vm/vm.h>
44#include <vm/vm_page.h>
45#include <vm/vm_map.h>
46
47#include <machine/atomic.h>
48#include <machine/bus.h>
49#include <machine/md_var.h>
50
51#define MAX_BPAGES 512
52
53struct bus_dma_tag {
54	bus_dma_tag_t	  parent;
55	bus_size_t	  alignment;
56	bus_size_t	  boundary;
57	bus_addr_t	  lowaddr;
58	bus_addr_t	  highaddr;
59	bus_dma_filter_t *filter;
60	void		 *filterarg;
61	bus_size_t	  maxsize;
62	u_int		  nsegments;
63	bus_size_t	  maxsegsz;
64	int		  flags;
65	int		  ref_count;
66	int		  map_count;
67};
68
69struct bounce_page {
70	vm_offset_t	vaddr;		/* kva of bounce buffer */
71	bus_addr_t	busaddr;	/* Physical address */
72	vm_offset_t	datavaddr;	/* kva of client data */
73	bus_size_t	datacount;	/* client data count */
74	STAILQ_ENTRY(bounce_page) links;
75};
76
77int busdma_swi_pending;
78
79static STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
80static int free_bpages;
81static int reserved_bpages;
82static int active_bpages;
83static int total_bpages;
84static bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
85
86struct bus_dmamap {
87	struct bp_list	       bpages;
88	int		       pagesneeded;
89	int		       pagesreserved;
90	bus_dma_tag_t	       dmat;
91	void		      *buf;		/* unmapped buffer pointer */
92	bus_size_t	       buflen;		/* unmapped buffer length */
93	bus_dmamap_callback_t *callback;
94	void		      *callback_arg;
95	struct mtx	      *callback_mtx;
96	STAILQ_ENTRY(bus_dmamap) links;
97};
98
99static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
100static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
101static struct bus_dmamap nobounce_dmamap;
102
103static void init_bounce_pages(void *dummy);
104static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
105static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
106    				int commit);
107static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
108				   vm_offset_t vaddr, bus_size_t size);
109static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
110static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
111
112/* To protect all the the bounce pages related lists and data. */
113static struct mtx bounce_lock;
114
115/*
116 * Return true if a match is made.
117 *
118 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
119 *
120 * If paddr is within the bounds of the dma tag then call the filter callback
121 * to check for a match, if there is no filter callback then assume a match.
122 */
123static __inline int
124run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
125{
126	int retval;
127
128	retval = 0;
129	do {
130		if (paddr > dmat->lowaddr
131		 && paddr <= dmat->highaddr
132		 && (dmat->filter == NULL
133		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
134			retval = 1;
135
136		dmat = dmat->parent;
137	} while (retval == 0 && dmat != NULL);
138	return (retval);
139}
140
141#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
142/*
143 * Allocate a device specific dma_tag.
144 */
145int
146bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
147		   bus_size_t boundary, bus_addr_t lowaddr,
148		   bus_addr_t highaddr, bus_dma_filter_t *filter,
149		   void *filterarg, bus_size_t maxsize, int nsegments,
150		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
151{
152	bus_dma_tag_t newtag;
153	int error = 0;
154
155	/* Return a NULL tag on failure */
156	*dmat = NULL;
157
158	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
159	if (newtag == NULL)
160		return (ENOMEM);
161
162	newtag->parent = parent;
163	newtag->alignment = alignment;
164	newtag->boundary = boundary;
165	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
166	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
167	    (PAGE_SIZE - 1);
168	newtag->filter = filter;
169	newtag->filterarg = filterarg;
170	newtag->maxsize = maxsize;
171	newtag->nsegments = nsegments;
172	newtag->maxsegsz = maxsegsz;
173	newtag->flags = flags;
174	newtag->ref_count = 1; /* Count ourself */
175	newtag->map_count = 0;
176
177	/* Take into account any restrictions imposed by our parent tag */
178	if (parent != NULL) {
179		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
180		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
181		/*
182		 * XXX Not really correct??? Probably need to honor boundary
183		 *     all the way up the inheritence chain.
184		 */
185		newtag->boundary = MAX(parent->boundary, newtag->boundary);
186		if (newtag->filter == NULL) {
187			/*
188			 * Short circuit looking at our parent directly
189			 * since we have encapsulated all of its information
190			 */
191			newtag->filter = parent->filter;
192			newtag->filterarg = parent->filterarg;
193			newtag->parent = parent->parent;
194		}
195		if (newtag->parent != NULL)
196			atomic_add_int(&parent->ref_count, 1);
197	}
198
199	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
200	    (flags & BUS_DMA_ALLOCNOW) != 0) {
201		/* Must bounce */
202
203		if (lowaddr > bounce_lowaddr) {
204			/*
205			 * Go through the pool and kill any pages
206			 * that don't reside below lowaddr.
207			 */
208			panic("bus_dma_tag_create: page reallocation "
209			      "not implemented");
210		}
211		if (ptoa(total_bpages) < maxsize) {
212			int pages;
213
214			pages = atop(maxsize) - total_bpages;
215
216			/* Add pages to our bounce pool */
217			if (alloc_bounce_pages(newtag, pages) < pages)
218				error = ENOMEM;
219		}
220		/* Performed initial allocation */
221		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
222	}
223
224	if (error != 0) {
225		free(newtag, M_DEVBUF);
226	} else {
227		*dmat = newtag;
228	}
229	return (error);
230}
231
232int
233bus_dma_tag_destroy(bus_dma_tag_t dmat)
234{
235	if (dmat != NULL) {
236
237		if (dmat->map_count != 0)
238			return (EBUSY);
239
240		while (dmat != NULL) {
241			bus_dma_tag_t parent;
242
243			parent = dmat->parent;
244			atomic_subtract_int(&dmat->ref_count, 1);
245			if (dmat->ref_count == 0) {
246				free(dmat, M_DEVBUF);
247				/*
248				 * Last reference count, so
249				 * release our reference
250				 * count on our parent.
251				 */
252				dmat = parent;
253			} else
254				dmat = NULL;
255		}
256	}
257	return (0);
258}
259
260/*
261 * Allocate a handle for mapping from kva/uva/physical
262 * address space into bus device space.
263 */
264int
265bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
266{
267	int error;
268
269	error = 0;
270
271	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
272		/* Must bounce */
273		int maxpages;
274
275		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
276					     M_NOWAIT | M_ZERO);
277		if (*mapp == NULL)
278			return (ENOMEM);
279
280		/* Initialize the new map */
281		STAILQ_INIT(&((*mapp)->bpages));
282
283		/*
284		 * Attempt to add pages to our pool on a per-instance
285		 * basis up to a sane limit.
286		 */
287		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
288		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
289		 || (dmat->map_count > 0
290		  && total_bpages < maxpages)) {
291			int pages;
292
293			if (dmat->lowaddr > bounce_lowaddr) {
294				/*
295				 * Go through the pool and kill any pages
296				 * that don't reside below lowaddr.
297				 */
298				panic("bus_dmamap_create: page reallocation "
299				      "not implemented");
300			}
301			pages = MAX(atop(dmat->maxsize), 1);
302			pages = MIN(maxpages - total_bpages, pages);
303			if (alloc_bounce_pages(dmat, pages) < pages)
304				error = ENOMEM;
305
306			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
307				if (error == 0)
308					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
309			} else {
310				error = 0;
311			}
312		}
313	} else {
314		*mapp = NULL;
315	}
316	if (error == 0)
317		dmat->map_count++;
318	return (error);
319}
320
321/*
322 * Destroy a handle for mapping from kva/uva/physical
323 * address space into bus device space.
324 */
325int
326bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
327{
328	if (map != NULL) {
329		if (STAILQ_FIRST(&map->bpages) != NULL)
330			return (EBUSY);
331		free(map, M_DEVBUF);
332	}
333	dmat->map_count--;
334	return (0);
335}
336
337
338/*
339 * Allocate a piece of memory that can be efficiently mapped into
340 * bus device space based on the constraints lited in the dma tag.
341 * A dmamap to for use with dmamap_load is also allocated.
342 */
343int
344bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
345		 bus_dmamap_t *mapp)
346{
347	/* If we succeed, no mapping/bouncing will be required */
348	*mapp = NULL;
349
350	if ((dmat->maxsize <= PAGE_SIZE) &&
351	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
352		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
353				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
354	} else {
355		/*
356		 * XXX Use Contigmalloc until it is merged into this facility
357		 *     and handles multi-seg allocations.  Nobody is doing
358		 *     multi-seg allocations yet though.
359		 */
360		mtx_lock(&Giant);
361		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
362		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
363		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
364		    dmat->boundary);
365		mtx_unlock(&Giant);
366	}
367	if (*vaddr == NULL)
368		return (ENOMEM);
369	return (0);
370}
371
372/*
373 * Free a piece of memory and it's allociated dmamap, that was allocated
374 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
375 */
376void
377bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
378{
379	/*
380	 * dmamem does not need to be bounced, so the map should be
381	 * NULL
382	 */
383	if (map != NULL)
384		panic("bus_dmamem_free: Invalid map freed\n");
385	if ((dmat->maxsize <= PAGE_SIZE)
386	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
387		free(vaddr, M_DEVBUF);
388	else {
389		mtx_lock(&Giant);
390		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
391		mtx_unlock(&Giant);
392	}
393}
394
395/*
396 * Utility function to load a linear buffer.  lastaddrp holds state
397 * between invocations (for multiple-buffer loads).  segp contains
398 * the starting segment on entrace, and the ending segment on exit.
399 * first indicates if this is the first invocation of this function.
400 */
401static int
402_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
403    			bus_dmamap_t map,
404			bus_dma_segment_t segs[],
405			void *buf, bus_size_t buflen,
406			struct thread *td,
407			int flags,
408			bus_addr_t *lastaddrp,
409			int *segp,
410			int first)
411{
412	bus_size_t sgsize;
413	bus_addr_t curaddr, lastaddr, baddr, bmask;
414	vm_offset_t vaddr;
415	bus_addr_t paddr;
416	int needbounce = 0;
417	int seg;
418	pmap_t pmap;
419
420	if (map == NULL)
421		map = &nobounce_dmamap;
422
423	if (td != NULL)
424		pmap = vmspace_pmap(td->td_proc->p_vmspace);
425	else
426		pmap = NULL;
427
428	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
429		vm_offset_t	vendaddr;
430
431		/*
432		 * Count the number of bounce pages
433		 * needed in order to complete this transfer
434		 */
435		vaddr = trunc_page((vm_offset_t)buf);
436		vendaddr = (vm_offset_t)buf + buflen;
437
438		while (vaddr < vendaddr) {
439			paddr = pmap_kextract(vaddr);
440			if (run_filter(dmat, paddr) != 0) {
441				needbounce = 1;
442				map->pagesneeded++;
443			}
444			vaddr += PAGE_SIZE;
445		}
446	}
447
448	vaddr = (vm_offset_t)buf;
449
450	/* Reserve Necessary Bounce Pages */
451	if (map->pagesneeded != 0) {
452		mtx_lock(&bounce_lock);
453		if (flags & BUS_DMA_NOWAIT) {
454			if (reserve_bounce_pages(dmat, map, 0) != 0) {
455				mtx_unlock(&bounce_lock);
456				return (ENOMEM);
457			}
458		} else {
459			if (reserve_bounce_pages(dmat, map, 1) != 0) {
460				/* Queue us for resources */
461				map->dmat = dmat;
462				map->buf = buf;
463				map->buflen = buflen;
464				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
465								map, links);
466				mtx_unlock(&bounce_lock);
467				return (EINPROGRESS);
468			}
469		}
470		mtx_unlock(&bounce_lock);
471	}
472
473	lastaddr = *lastaddrp;
474	bmask = ~(dmat->boundary - 1);
475
476	for (seg = *segp; buflen > 0 ; ) {
477		/*
478		 * Get the physical address for this segment.
479		 */
480		if (pmap)
481			curaddr = pmap_extract(pmap, vaddr);
482		else
483			curaddr = pmap_kextract(vaddr);
484
485		/*
486		 * Compute the segment size, and adjust counts.
487		 */
488		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
489		if (buflen < sgsize)
490			sgsize = buflen;
491
492		/*
493		 * Make sure we don't cross any boundaries.
494		 */
495		if (dmat->boundary > 0) {
496			baddr = (curaddr + dmat->boundary) & bmask;
497			if (sgsize > (baddr - curaddr))
498				sgsize = (baddr - curaddr);
499		}
500
501		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
502			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
503
504		/*
505		 * Insert chunk into a segment, coalescing with
506		 * previous segment if possible.
507		 */
508		if (first) {
509			segs[seg].ds_addr = curaddr;
510			segs[seg].ds_len = sgsize;
511			first = 0;
512		} else {
513			if (needbounce == 0 && curaddr == lastaddr &&
514			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
515			    (dmat->boundary == 0 ||
516			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
517				segs[seg].ds_len += sgsize;
518			else {
519				if (++seg >= dmat->nsegments)
520					break;
521				segs[seg].ds_addr = curaddr;
522				segs[seg].ds_len = sgsize;
523			}
524		}
525
526		lastaddr = curaddr + sgsize;
527		vaddr += sgsize;
528		buflen -= sgsize;
529	}
530
531	*segp = seg;
532	*lastaddrp = lastaddr;
533
534	/*
535	 * Did we fit?
536	 */
537	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
538}
539
540#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1)
541
542/*
543 * Map the buffer buf into bus space using the dmamap map.
544 */
545int
546bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
547		bus_size_t buflen, bus_dmamap_callback_t *callback,
548		void *callback_arg, int flags)
549{
550#ifdef __GNUC__
551	bus_dma_segment_t	dm_segments[dmat->nsegments];
552#else
553	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
554#endif
555	bus_addr_t		lastaddr = 0;
556	int			error, nsegs = 0;
557
558	if (map != NULL) {
559		flags |= BUS_DMA_WAITOK;
560		map->callback = callback;
561		map->callback_arg = callback_arg;
562	}
563
564	error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen,
565	    NULL, flags, &lastaddr, &nsegs, 1);
566
567	if (error == EINPROGRESS)
568		return (error);
569
570	if (error)
571		(*callback)(callback_arg, dm_segments, 0, error);
572	else
573		(*callback)(callback_arg, dm_segments, nsegs + 1, 0);
574
575	return (0);
576}
577
578
579/*
580 * Like _bus_dmamap_load(), but for mbufs.
581 */
582int
583bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
584		     struct mbuf *m0,
585		     bus_dmamap_callback2_t *callback, void *callback_arg,
586		     int flags)
587{
588#ifdef __GNUC__
589	bus_dma_segment_t dm_segments[dmat->nsegments];
590#else
591	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
592#endif
593	int nsegs, error;
594
595	KASSERT(m0->m_flags & M_PKTHDR,
596		("bus_dmamap_load_mbuf: no packet header"));
597
598	flags |= BUS_DMA_NOWAIT;
599	nsegs = 0;
600	error = 0;
601	if (m0->m_pkthdr.len <= dmat->maxsize) {
602		int first = 1;
603		bus_addr_t lastaddr = 0;
604		struct mbuf *m;
605
606		for (m = m0; m != NULL && error == 0; m = m->m_next) {
607			if (m->m_len > 0) {
608				error = _bus_dmamap_load_buffer(dmat, map,
609						dm_segments,
610						m->m_data, m->m_len,
611						NULL, flags, &lastaddr,
612						&nsegs, first);
613				first = 0;
614			}
615		}
616	} else {
617		error = EINVAL;
618	}
619
620	if (error) {
621		/* force "no valid mappings" in callback */
622		(*callback)(callback_arg, dm_segments, 0, 0, error);
623	} else {
624		(*callback)(callback_arg, dm_segments,
625			    nsegs+1, m0->m_pkthdr.len, error);
626	}
627	return (error);
628}
629
630/*
631 * Like _bus_dmamap_load(), but for uios.
632 */
633int
634bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
635		    struct uio *uio,
636		    bus_dmamap_callback2_t *callback, void *callback_arg,
637		    int flags)
638{
639	bus_addr_t lastaddr;
640#ifdef __GNUC__
641	bus_dma_segment_t dm_segments[dmat->nsegments];
642#else
643	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
644#endif
645	int nsegs, error, first, i;
646	bus_size_t resid;
647	struct iovec *iov;
648	struct thread *td = NULL;
649
650	flags |= BUS_DMA_NOWAIT;
651	resid = uio->uio_resid;
652	iov = uio->uio_iov;
653
654	if (uio->uio_segflg == UIO_USERSPACE) {
655		td = uio->uio_td;
656		KASSERT(td != NULL,
657			("bus_dmamap_load_uio: USERSPACE but no proc"));
658	}
659
660	nsegs = 0;
661	error = 0;
662	first = 1;
663	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
664		/*
665		 * Now at the first iovec to load.  Load each iovec
666		 * until we have exhausted the residual count.
667		 */
668		bus_size_t minlen =
669			resid < iov[i].iov_len ? resid : iov[i].iov_len;
670		caddr_t addr = (caddr_t) iov[i].iov_base;
671
672		if (minlen > 0) {
673			error = _bus_dmamap_load_buffer(dmat, map,
674					dm_segments,
675					addr, minlen,
676					td, flags, &lastaddr, &nsegs, first);
677			first = 0;
678
679			resid -= minlen;
680		}
681	}
682
683	if (error) {
684		/* force "no valid mappings" in callback */
685		(*callback)(callback_arg, dm_segments, 0, 0, error);
686	} else {
687		(*callback)(callback_arg, dm_segments,
688			    nsegs+1, uio->uio_resid, error);
689	}
690	return (error);
691}
692
693/*
694 * Release the mapping held by map.
695 */
696void
697_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
698{
699	struct bounce_page *bpage;
700
701	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
702		STAILQ_REMOVE_HEAD(&map->bpages, links);
703		free_bounce_page(dmat, bpage);
704	}
705}
706
707void
708_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
709{
710	struct bounce_page *bpage;
711
712	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
713		/*
714		 * Handle data bouncing.  We might also
715		 * want to add support for invalidating
716		 * the caches on broken hardware
717		 */
718		if (op & BUS_DMASYNC_PREWRITE) {
719			while (bpage != NULL) {
720				bcopy((void *)bpage->datavaddr,
721				      (void *)bpage->vaddr,
722				      bpage->datacount);
723				bpage = STAILQ_NEXT(bpage, links);
724			}
725		}
726
727		if (op & BUS_DMASYNC_POSTREAD) {
728			while (bpage != NULL) {
729				bcopy((void *)bpage->vaddr,
730				      (void *)bpage->datavaddr,
731				      bpage->datacount);
732				bpage = STAILQ_NEXT(bpage, links);
733			}
734		}
735	}
736}
737
738static void
739init_bounce_pages(void *dummy __unused)
740{
741
742	free_bpages = 0;
743	reserved_bpages = 0;
744	active_bpages = 0;
745	total_bpages = 0;
746	STAILQ_INIT(&bounce_page_list);
747	STAILQ_INIT(&bounce_map_waitinglist);
748	STAILQ_INIT(&bounce_map_callbacklist);
749	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
750}
751SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
752
753static int
754alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
755{
756	int count;
757
758	count = 0;
759	while (numpages > 0) {
760		struct bounce_page *bpage;
761
762		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
763						     M_NOWAIT | M_ZERO);
764
765		if (bpage == NULL)
766			break;
767		mtx_lock(&Giant);
768		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
769							 M_NOWAIT, 0ul,
770							 dmat->lowaddr,
771							 PAGE_SIZE,
772							 0);
773		mtx_unlock(&Giant);
774		if (bpage->vaddr == 0) {
775			free(bpage, M_DEVBUF);
776			break;
777		}
778		bpage->busaddr = pmap_kextract(bpage->vaddr);
779		mtx_lock(&bounce_lock);
780		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
781		total_bpages++;
782		free_bpages++;
783		mtx_unlock(&bounce_lock);
784		count++;
785		numpages--;
786	}
787	return (count);
788}
789
790static int
791reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
792{
793	int pages;
794
795	mtx_assert(&bounce_lock, MA_OWNED);
796	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
797	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
798		return (map->pagesneeded - (map->pagesreserved + pages));
799	free_bpages -= pages;
800	reserved_bpages += pages;
801	map->pagesreserved += pages;
802	pages = map->pagesneeded - map->pagesreserved;
803
804	return (pages);
805}
806
807static bus_addr_t
808add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
809		bus_size_t size)
810{
811	struct bounce_page *bpage;
812
813	KASSERT(map != NULL && map != &nobounce_dmamap,
814	    ("add_bounce_page: bad map %p", map));
815
816	if (map->pagesneeded == 0)
817		panic("add_bounce_page: map doesn't need any pages");
818	map->pagesneeded--;
819
820	if (map->pagesreserved == 0)
821		panic("add_bounce_page: map doesn't need any pages");
822	map->pagesreserved--;
823
824	mtx_lock(&bounce_lock);
825	bpage = STAILQ_FIRST(&bounce_page_list);
826	if (bpage == NULL)
827		panic("add_bounce_page: free page list is empty");
828
829	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
830	reserved_bpages--;
831	active_bpages++;
832	mtx_unlock(&bounce_lock);
833
834	bpage->datavaddr = vaddr;
835	bpage->datacount = size;
836	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
837	return (bpage->busaddr);
838}
839
840static void
841free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
842{
843	struct bus_dmamap *map;
844
845	bpage->datavaddr = 0;
846	bpage->datacount = 0;
847
848	mtx_lock(&bounce_lock);
849	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
850	free_bpages++;
851	active_bpages--;
852	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
853		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
854			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
855			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
856					   map, links);
857			busdma_swi_pending = 1;
858			swi_sched(vm_ih, 0);
859		}
860	}
861	mtx_unlock(&bounce_lock);
862}
863
864void
865busdma_swi(void)
866{
867	struct bus_dmamap *map;
868
869	mtx_lock(&bounce_lock);
870	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
871		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
872		mtx_unlock(&bounce_lock);
873		if (map->callback_mtx != NULL)
874			mtx_lock(map->callback_mtx);
875		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
876				map->callback, map->callback_arg, /*flags*/0);
877		if (map->callback_mtx != NULL)
878			mtx_unlock(map->callback_mtx);
879		mtx_lock(&bounce_lock);
880	}
881	mtx_unlock(&bounce_lock);
882}
883