busdma_machdep.c revision 138194
132516Sgibbs/*
240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs.
332516Sgibbs * All rights reserved.
432516Sgibbs *
532516Sgibbs * Redistribution and use in source and binary forms, with or without
632516Sgibbs * modification, are permitted provided that the following conditions
732516Sgibbs * are met:
832516Sgibbs * 1. Redistributions of source code must retain the above copyright
932516Sgibbs *    notice, this list of conditions, and the following disclaimer,
1032516Sgibbs *    without modification, immediately at the beginning of the file.
1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products
1232516Sgibbs *    derived from this software without specific prior written permission.
1332516Sgibbs *
1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2432516Sgibbs * SUCH DAMAGE.
2532516Sgibbs */
2632516Sgibbs
27115683Sobrien#include <sys/cdefs.h>
28115683Sobrien__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 138194 2004-11-29 14:49:27Z scottl $");
29115683Sobrien
3032516Sgibbs#include <sys/param.h>
3132516Sgibbs#include <sys/systm.h>
3232516Sgibbs#include <sys/malloc.h>
3367551Sjhb#include <sys/bus.h>
3467551Sjhb#include <sys/interrupt.h>
35112346Smux#include <sys/kernel.h>
36136805Srwatson#include <sys/ktr.h>
3776827Salfred#include <sys/lock.h>
3879224Sdillon#include <sys/proc.h>
3976827Salfred#include <sys/mutex.h>
40104486Ssam#include <sys/mbuf.h>
41104486Ssam#include <sys/uio.h>
42131529Sscottl#include <sys/sysctl.h>
4332516Sgibbs
4432516Sgibbs#include <vm/vm.h>
4532516Sgibbs#include <vm/vm_page.h>
46104486Ssam#include <vm/vm_map.h>
4732516Sgibbs
48112436Smux#include <machine/atomic.h>
4932516Sgibbs#include <machine/bus.h>
5032516Sgibbs#include <machine/md_var.h>
5132516Sgibbs
52113228Sjake#define MAX_BPAGES 512
5332516Sgibbs
54137445Sscottlstruct bounce_zone;
55137445Sscottl
5632516Sgibbsstruct bus_dma_tag {
5732516Sgibbs	bus_dma_tag_t	  parent;
5835767Sgibbs	bus_size_t	  alignment;
5932516Sgibbs	bus_size_t	  boundary;
6032516Sgibbs	bus_addr_t	  lowaddr;
6132516Sgibbs	bus_addr_t	  highaddr;
6232516Sgibbs	bus_dma_filter_t *filter;
6332516Sgibbs	void		 *filterarg;
6432516Sgibbs	bus_size_t	  maxsize;
6535767Sgibbs	u_int		  nsegments;
6632516Sgibbs	bus_size_t	  maxsegsz;
6732516Sgibbs	int		  flags;
6832516Sgibbs	int		  ref_count;
6932516Sgibbs	int		  map_count;
70117126Sscottl	bus_dma_lock_t	 *lockfunc;
71117126Sscottl	void		 *lockfuncarg;
72118246Sscottl	bus_dma_segment_t *segments;
73137445Sscottl	struct bounce_zone *bounce_zone;
7432516Sgibbs};
7532516Sgibbs
76132545Sscottlstruct bounce_page {
77132545Sscottl	vm_offset_t	vaddr;		/* kva of bounce buffer */
78132545Sscottl	bus_addr_t	busaddr;	/* Physical address */
79132545Sscottl	vm_offset_t	datavaddr;	/* kva of client data */
80132545Sscottl	bus_size_t	datacount;	/* client data count */
81132545Sscottl	STAILQ_ENTRY(bounce_page) links;
82132545Sscottl};
83132545Sscottl
8432516Sgibbsint busdma_swi_pending;
8532516Sgibbs
86137445Sscottlstruct bounce_zone {
87137445Sscottl	STAILQ_ENTRY(bounce_zone) links;
88137445Sscottl	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
89137965Sscottl	int		total_bpages;
90137445Sscottl	int		free_bpages;
91137445Sscottl	int		reserved_bpages;
92137445Sscottl	int		active_bpages;
93137445Sscottl	int		total_bounced;
94137445Sscottl	int		total_deferred;
95137445Sscottl	bus_size_t	alignment;
96137445Sscottl	bus_size_t	boundary;
97137445Sscottl	bus_addr_t	lowaddr;
98137445Sscottl	char		zoneid[8];
99137445Sscottl	char		lowaddrid[20];
100137445Sscottl	struct sysctl_ctx_list sysctl_tree;
101137445Sscottl	struct sysctl_oid *sysctl_tree_top;
102137445Sscottl};
103137445Sscottl
104117136Smuxstatic struct mtx bounce_lock;
10532516Sgibbsstatic int total_bpages;
106137445Sscottlstatic int busdma_zonecount;
107137445Sscottlstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
10832516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
10932516Sgibbs
110131529SscottlSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
111131529SscottlSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
112131529Sscottl	   "Total bounce pages");
113131529Sscottl
11432516Sgibbsstruct bus_dmamap {
11532516Sgibbs	struct bp_list	       bpages;
11632516Sgibbs	int		       pagesneeded;
11732516Sgibbs	int		       pagesreserved;
11832516Sgibbs	bus_dma_tag_t	       dmat;
11932516Sgibbs	void		      *buf;		/* unmapped buffer pointer */
12032516Sgibbs	bus_size_t	       buflen;		/* unmapped buffer length */
12132516Sgibbs	bus_dmamap_callback_t *callback;
12232516Sgibbs	void		      *callback_arg;
12360938Sjake	STAILQ_ENTRY(bus_dmamap) links;
12432516Sgibbs};
12532516Sgibbs
12660938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
12760938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
12832516Sgibbsstatic struct bus_dmamap nobounce_dmamap;
12932516Sgibbs
130112346Smuxstatic void init_bounce_pages(void *dummy);
131137965Sscottlstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
13232516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
133113228Sjakestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
134117136Smux				int commit);
135112569Sjakestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
13632516Sgibbs				   vm_offset_t vaddr, bus_size_t size);
13732516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
138137894Sscottlstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
13932516Sgibbs
14095076Salfred/*
14195076Salfred * Return true if a match is made.
142117136Smux *
14395076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
144117136Smux *
14595076Salfred * If paddr is within the bounds of the dma tag then call the filter callback
14695076Salfred * to check for a match, if there is no filter callback then assume a match.
14795076Salfred */
14832516Sgibbsstatic __inline int
149137894Sscottlrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
15032516Sgibbs{
15132516Sgibbs	int retval;
15232516Sgibbs
15332516Sgibbs	retval = 0;
154131529Sscottl
15532516Sgibbs	do {
156131529Sscottl		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
157137894Sscottl		 || ((paddr & (dmat->alignment - 1)) != 0))
15832516Sgibbs		 && (dmat->filter == NULL
159132545Sscottl		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
16032516Sgibbs			retval = 1;
16132516Sgibbs
16232516Sgibbs		dmat = dmat->parent;
16332516Sgibbs	} while (retval == 0 && dmat != NULL);
16432516Sgibbs	return (retval);
16532516Sgibbs}
16632516Sgibbs
167117126Sscottl/*
168117126Sscottl * Convenience function for manipulating driver locks from busdma (during
169117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
170117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
171117126Sscottl * non-mutex locking scheme don't have to use this at all.
172117126Sscottl */
173117126Sscottlvoid
174117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
175117126Sscottl{
176117126Sscottl	struct mtx *dmtx;
177117126Sscottl
178117126Sscottl	dmtx = (struct mtx *)arg;
179117126Sscottl	switch (op) {
180117126Sscottl	case BUS_DMA_LOCK:
181117126Sscottl		mtx_lock(dmtx);
182117126Sscottl		break;
183117126Sscottl	case BUS_DMA_UNLOCK:
184117126Sscottl		mtx_unlock(dmtx);
185117126Sscottl		break;
186117126Sscottl	default:
187117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
188117126Sscottl	}
189117126Sscottl}
190117126Sscottl
191117126Sscottl/*
192117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
193117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
194117126Sscottl * with the tag are meant to never be defered.
195117126Sscottl * XXX Should have a way to identify which driver is responsible here.
196117126Sscottl */
197117126Sscottlstatic void
198117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
199117126Sscottl{
200117126Sscottl	panic("driver error: busdma dflt_lock called");
201117126Sscottl}
202117126Sscottl
203137965Sscottl#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
204137965Sscottl#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
20532516Sgibbs/*
20632516Sgibbs * Allocate a device specific dma_tag.
20732516Sgibbs */
20832516Sgibbsint
20935767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
21035767Sgibbs		   bus_size_t boundary, bus_addr_t lowaddr,
21135767Sgibbs		   bus_addr_t highaddr, bus_dma_filter_t *filter,
21235767Sgibbs		   void *filterarg, bus_size_t maxsize, int nsegments,
213117126Sscottl		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
214117126Sscottl		   void *lockfuncarg, bus_dma_tag_t *dmat)
21532516Sgibbs{
21632516Sgibbs	bus_dma_tag_t newtag;
21732516Sgibbs	int error = 0;
21832516Sgibbs
219131529Sscottl	/* Basic sanity checking */
220131529Sscottl	if (boundary != 0 && boundary < maxsegsz)
221131529Sscottl		maxsegsz = boundary;
222131529Sscottl
22332516Sgibbs	/* Return a NULL tag on failure */
22432516Sgibbs	*dmat = NULL;
22532516Sgibbs
226137460Sscottl	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
227137460Sscottl	    M_ZERO | M_NOWAIT);
228136805Srwatson	if (newtag == NULL) {
229136805Srwatson		CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag "
230136805Srwatson		    "flags 0x%x error %d", newtag, 0, error);
23132516Sgibbs		return (ENOMEM);
232136805Srwatson	}
23332516Sgibbs
23432516Sgibbs	newtag->parent = parent;
23548449Smjacob	newtag->alignment = alignment;
23632516Sgibbs	newtag->boundary = boundary;
237112569Sjake	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
238112569Sjake	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
239112569Sjake	    (PAGE_SIZE - 1);
24032516Sgibbs	newtag->filter = filter;
24132516Sgibbs	newtag->filterarg = filterarg;
24232516Sgibbs	newtag->maxsize = maxsize;
24332516Sgibbs	newtag->nsegments = nsegments;
24432516Sgibbs	newtag->maxsegsz = maxsegsz;
24532516Sgibbs	newtag->flags = flags;
24632516Sgibbs	newtag->ref_count = 1; /* Count ourself */
24732516Sgibbs	newtag->map_count = 0;
248117126Sscottl	if (lockfunc != NULL) {
249117126Sscottl		newtag->lockfunc = lockfunc;
250117126Sscottl		newtag->lockfuncarg = lockfuncarg;
251117126Sscottl	} else {
252117126Sscottl		newtag->lockfunc = dflt_lock;
253117126Sscottl		newtag->lockfuncarg = NULL;
254117126Sscottl	}
255118246Sscottl	newtag->segments = NULL;
256118246Sscottl
25732516Sgibbs	/* Take into account any restrictions imposed by our parent tag */
25832516Sgibbs	if (parent != NULL) {
25932516Sgibbs		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
26032516Sgibbs		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
261134934Sscottl		if (newtag->boundary == 0)
262134934Sscottl			newtag->boundary = parent->boundary;
263134934Sscottl		else if (parent->boundary != 0)
264134934Sscottl			newtag->boundary = MIN(parent->boundary,
265134934Sscottl					       newtag->boundary);
26632516Sgibbs		if (newtag->filter == NULL) {
26732516Sgibbs			/*
26832516Sgibbs			 * Short circuit looking at our parent directly
26935256Sdes			 * since we have encapsulated all of its information
27032516Sgibbs			 */
27132516Sgibbs			newtag->filter = parent->filter;
27232516Sgibbs			newtag->filterarg = parent->filterarg;
27332516Sgibbs			newtag->parent = parent->parent;
27432516Sgibbs		}
275112436Smux		if (newtag->parent != NULL)
276112436Smux			atomic_add_int(&parent->ref_count, 1);
27732516Sgibbs	}
278137965Sscottl
279137965Sscottl	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
280138194Sscottl	 || newtag->alignment > 1)
281137965Sscottl		newtag->flags |= BUS_DMA_COULD_BOUNCE;
282137965Sscottl
283137965Sscottl	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
284112569Sjake	    (flags & BUS_DMA_ALLOCNOW) != 0) {
285137965Sscottl		struct bounce_zone *bz;
286137965Sscottl
28732516Sgibbs		/* Must bounce */
28832516Sgibbs
289137965Sscottl		if ((error = alloc_bounce_zone(newtag)) != 0)
290137965Sscottl			return (error);
291137965Sscottl		bz = newtag->bounce_zone;
292137965Sscottl
29332516Sgibbs		if (lowaddr > bounce_lowaddr) {
29432516Sgibbs			/*
29532516Sgibbs			 * Go through the pool and kill any pages
29632516Sgibbs			 * that don't reside below lowaddr.
29732516Sgibbs			 */
29835767Sgibbs			panic("bus_dma_tag_create: page reallocation "
29932516Sgibbs			      "not implemented");
30032516Sgibbs		}
301137965Sscottl		if (ptoa(bz->total_bpages) < maxsize) {
30232516Sgibbs			int pages;
30332516Sgibbs
304137965Sscottl			pages = atop(maxsize) - bz->total_bpages;
30532516Sgibbs
30632516Sgibbs			/* Add pages to our bounce pool */
30732516Sgibbs			if (alloc_bounce_pages(newtag, pages) < pages)
30832516Sgibbs				error = ENOMEM;
30932516Sgibbs		}
31035767Sgibbs		/* Performed initial allocation */
31135767Sgibbs		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
31232516Sgibbs	}
31332516Sgibbs
31432516Sgibbs	if (error != 0) {
31532516Sgibbs		free(newtag, M_DEVBUF);
31632516Sgibbs	} else {
31732516Sgibbs		*dmat = newtag;
31832516Sgibbs	}
319136805Srwatson	CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag flags 0x%x "
320136805Srwatson	    "error %d", newtag, (newtag != NULL ? newtag->flags : 0), error);
32132516Sgibbs	return (error);
32232516Sgibbs}
32332516Sgibbs
32432516Sgibbsint
32532516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat)
32632516Sgibbs{
327136805Srwatson	bus_dma_tag_t dmat_copy;
328136805Srwatson	int error;
329136805Srwatson
330136805Srwatson	error = 0;
331136805Srwatson	dmat_copy = dmat;
332136805Srwatson
33332516Sgibbs	if (dmat != NULL) {
33432516Sgibbs
335136805Srwatson		if (dmat->map_count != 0) {
336136805Srwatson			error = EBUSY;
337136805Srwatson			goto out;
338136805Srwatson		}
33932516Sgibbs
34032516Sgibbs		while (dmat != NULL) {
34132516Sgibbs			bus_dma_tag_t parent;
34232516Sgibbs
34332516Sgibbs			parent = dmat->parent;
344112436Smux			atomic_subtract_int(&dmat->ref_count, 1);
34532516Sgibbs			if (dmat->ref_count == 0) {
346118246Sscottl				if (dmat->segments != NULL)
347118246Sscottl					free(dmat->segments, M_DEVBUF);
34832516Sgibbs				free(dmat, M_DEVBUF);
34940029Sgibbs				/*
35040029Sgibbs				 * Last reference count, so
35140029Sgibbs				 * release our reference
35240029Sgibbs				 * count on our parent.
35340029Sgibbs				 */
35440029Sgibbs				dmat = parent;
35540029Sgibbs			} else
35640029Sgibbs				dmat = NULL;
35732516Sgibbs		}
35832516Sgibbs	}
359136805Srwatsonout:
360136805Srwatson	CTR2(KTR_BUSDMA, "bus_dma_tag_destroy tag %p error %d", dmat_copy,
361136805Srwatson	    error);
362136805Srwatson	return (error);
36332516Sgibbs}
36432516Sgibbs
36532516Sgibbs/*
36632516Sgibbs * Allocate a handle for mapping from kva/uva/physical
36732516Sgibbs * address space into bus device space.
36832516Sgibbs */
36932516Sgibbsint
37032516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
37132516Sgibbs{
37232516Sgibbs	int error;
37332516Sgibbs
37432516Sgibbs	error = 0;
37532516Sgibbs
376118246Sscottl	if (dmat->segments == NULL) {
377118246Sscottl		dmat->segments = (bus_dma_segment_t *)malloc(
378118246Sscottl		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
379118246Sscottl		    M_NOWAIT);
380136805Srwatson		if (dmat->segments == NULL) {
381136805Srwatson			CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d",
382136805Srwatson			    dmat, ENOMEM);
383118246Sscottl			return (ENOMEM);
384136805Srwatson		}
385118246Sscottl	}
386118246Sscottl
387131529Sscottl	/*
388131529Sscottl	 * Bouncing might be required if the driver asks for an active
389131529Sscottl	 * exclusion region, a data alignment that is stricter than 1, and/or
390131529Sscottl	 * an active address boundary.
391131529Sscottl	 */
392137965Sscottl	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
393137445Sscottl
39432516Sgibbs		/* Must bounce */
39532516Sgibbs		int maxpages;
39632516Sgibbs
397137965Sscottl		if (dmat->bounce_zone == NULL) {
398137965Sscottl			if ((error = alloc_bounce_zone(dmat)) != 0)
399137965Sscottl				return (error);
400137965Sscottl		}
401137965Sscottl
40232516Sgibbs		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
40369781Sdwmalone					     M_NOWAIT | M_ZERO);
404136805Srwatson		if (*mapp == NULL) {
405136805Srwatson			CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d",
406136805Srwatson			    dmat, ENOMEM);
40735767Sgibbs			return (ENOMEM);
408136805Srwatson		}
40969781Sdwmalone
41069781Sdwmalone		/* Initialize the new map */
41169781Sdwmalone		STAILQ_INIT(&((*mapp)->bpages));
41269781Sdwmalone
41332516Sgibbs		/*
41432516Sgibbs		 * Attempt to add pages to our pool on a per-instance
41532516Sgibbs		 * basis up to a sane limit.
41632516Sgibbs		 */
41732516Sgibbs		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
41835767Sgibbs		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
419131529Sscottl		 || (dmat->map_count > 0 && total_bpages < maxpages)) {
42032516Sgibbs			int pages;
42132516Sgibbs
42235767Sgibbs			if (dmat->lowaddr > bounce_lowaddr) {
42335767Sgibbs				/*
42435767Sgibbs				 * Go through the pool and kill any pages
42535767Sgibbs				 * that don't reside below lowaddr.
42635767Sgibbs				 */
42735767Sgibbs				panic("bus_dmamap_create: page reallocation "
42835767Sgibbs				      "not implemented");
42935767Sgibbs			}
430113228Sjake			pages = MAX(atop(dmat->maxsize), 1);
43132516Sgibbs			pages = MIN(maxpages - total_bpages, pages);
432113228Sjake			if (alloc_bounce_pages(dmat, pages) < pages)
433113228Sjake				error = ENOMEM;
43435767Sgibbs
43535767Sgibbs			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
43635767Sgibbs				if (error == 0)
43735767Sgibbs					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
43835767Sgibbs			} else {
43935767Sgibbs				error = 0;
44035767Sgibbs			}
44132516Sgibbs		}
44232516Sgibbs	} else {
44340029Sgibbs		*mapp = NULL;
44432516Sgibbs	}
44532516Sgibbs	if (error == 0)
44632516Sgibbs		dmat->map_count++;
447136805Srwatson	CTR3(KTR_BUSDMA, "bus_dmamap_create: tag %p tag flags 0x%x error %d",
448136805Srwatson	    dmat, dmat->flags, error);
44932516Sgibbs	return (error);
45032516Sgibbs}
45132516Sgibbs
45232516Sgibbs/*
45332516Sgibbs * Destroy a handle for mapping from kva/uva/physical
45432516Sgibbs * address space into bus device space.
45532516Sgibbs */
45632516Sgibbsint
45732516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
45832516Sgibbs{
459117136Smux	if (map != NULL && map != &nobounce_dmamap) {
460136805Srwatson		if (STAILQ_FIRST(&map->bpages) != NULL) {
461136805Srwatson			CTR2(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error %d",
462136805Srwatson			    dmat, EBUSY);
46332516Sgibbs			return (EBUSY);
464136805Srwatson		}
46532516Sgibbs		free(map, M_DEVBUF);
46632516Sgibbs	}
46732516Sgibbs	dmat->map_count--;
468136805Srwatson	CTR1(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error 0", dmat);
46932516Sgibbs	return (0);
47032516Sgibbs}
47132516Sgibbs
47235767Sgibbs
47335767Sgibbs/*
47435767Sgibbs * Allocate a piece of memory that can be efficiently mapped into
47535767Sgibbs * bus device space based on the constraints lited in the dma tag.
47635767Sgibbs * A dmamap to for use with dmamap_load is also allocated.
47735767Sgibbs */
47835767Sgibbsint
479115316Sscottlbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
480115316Sscottl		 bus_dmamap_t *mapp)
48135767Sgibbs{
482118081Smux	int mflags;
483118081Smux
484118081Smux	if (flags & BUS_DMA_NOWAIT)
485118081Smux		mflags = M_NOWAIT;
486118081Smux	else
487118081Smux		mflags = M_WAITOK;
488118081Smux	if (flags & BUS_DMA_ZERO)
489118081Smux		mflags |= M_ZERO;
490118081Smux
49135767Sgibbs	/* If we succeed, no mapping/bouncing will be required */
49240029Sgibbs	*mapp = NULL;
49335767Sgibbs
494118246Sscottl	if (dmat->segments == NULL) {
495118246Sscottl		dmat->segments = (bus_dma_segment_t *)malloc(
496118246Sscottl		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
497118246Sscottl		    M_NOWAIT);
498136805Srwatson		if (dmat->segments == NULL) {
499136805Srwatson			CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag "
500136805Srwatson			    "flags 0x%x error %d", dmat, dmat->flags, ENOMEM);
501118246Sscottl			return (ENOMEM);
502136805Srwatson		}
503118246Sscottl	}
504118246Sscottl
505115316Sscottl	if ((dmat->maxsize <= PAGE_SIZE) &&
506112569Sjake	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
507118081Smux		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
50835767Sgibbs	} else {
50935767Sgibbs		/*
51035767Sgibbs		 * XXX Use Contigmalloc until it is merged into this facility
51135767Sgibbs		 *     and handles multi-seg allocations.  Nobody is doing
51235767Sgibbs		 *     multi-seg allocations yet though.
513131529Sscottl		 * XXX Certain AGP hardware does.
51435767Sgibbs		 */
515118081Smux		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
51648449Smjacob		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
51748449Smjacob		    dmat->boundary);
51835767Sgibbs	}
519136805Srwatson	if (*vaddr == NULL) {
520136805Srwatson		CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x "
521136805Srwatson		    "error %d", dmat, dmat->flags, ENOMEM);
52235767Sgibbs		return (ENOMEM);
523136805Srwatson	}
524136805Srwatson	CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag flags 0x%x error %d",
525136805Srwatson	    dmat, dmat->flags, ENOMEM);
52635767Sgibbs	return (0);
52735767Sgibbs}
52835767Sgibbs
52935767Sgibbs/*
53035767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated
53195076Salfred * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
53235767Sgibbs */
53335767Sgibbsvoid
534115316Sscottlbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
53535767Sgibbs{
53635767Sgibbs	/*
53735767Sgibbs	 * dmamem does not need to be bounced, so the map should be
53835767Sgibbs	 * NULL
53935767Sgibbs	 */
54049859Sgibbs	if (map != NULL)
54135767Sgibbs		panic("bus_dmamem_free: Invalid map freed\n");
542115316Sscottl	if ((dmat->maxsize <= PAGE_SIZE)
543115316Sscottl	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
54440029Sgibbs		free(vaddr, M_DEVBUF);
545112196Smux	else {
546115316Sscottl		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
547112196Smux	}
548136805Srwatson	CTR2(KTR_BUSDMA, "bus_dmamem_free: tag %p flags 0x%x", dmat,
549136805Srwatson	    dmat->flags);
55035767Sgibbs}
55135767Sgibbs
55232516Sgibbs/*
553104486Ssam * Utility function to load a linear buffer.  lastaddrp holds state
554104486Ssam * between invocations (for multiple-buffer loads).  segp contains
555104486Ssam * the starting segment on entrace, and the ending segment on exit.
556104486Ssam * first indicates if this is the first invocation of this function.
557104486Ssam */
558137142Sscottlstatic __inline int
559104486Ssam_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
560113228Sjake    			bus_dmamap_t map,
561104486Ssam			void *buf, bus_size_t buflen,
562137142Sscottl			pmap_t pmap,
563104486Ssam			int flags,
564113228Sjake			bus_addr_t *lastaddrp,
565104486Ssam			int *segp,
566104486Ssam			int first)
567104486Ssam{
568118246Sscottl	bus_dma_segment_t *segs;
569104486Ssam	bus_size_t sgsize;
570104486Ssam	bus_addr_t curaddr, lastaddr, baddr, bmask;
571113228Sjake	vm_offset_t vaddr;
572113228Sjake	bus_addr_t paddr;
573113228Sjake	int needbounce = 0;
574104486Ssam	int seg;
575104486Ssam
576118246Sscottl	segs = dmat->segments;
577118246Sscottl
578113228Sjake	if (map == NULL)
579113228Sjake		map = &nobounce_dmamap;
580113228Sjake
581137142Sscottl	if ((map != &nobounce_dmamap && map->pagesneeded == 0)
582137965Sscottl	 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
583113228Sjake		vm_offset_t	vendaddr;
584113228Sjake
585137142Sscottl		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
586137142Sscottl		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
587137142Sscottl		    dmat->boundary, dmat->alignment);
588137142Sscottl		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
589137142Sscottl		    map, &nobounce_dmamap, map->pagesneeded);
590113228Sjake		/*
591113228Sjake		 * Count the number of bounce pages
592113228Sjake		 * needed in order to complete this transfer
593113228Sjake		 */
594113228Sjake		vaddr = trunc_page((vm_offset_t)buf);
595113228Sjake		vendaddr = (vm_offset_t)buf + buflen;
596113228Sjake
597113228Sjake		while (vaddr < vendaddr) {
598113228Sjake			paddr = pmap_kextract(vaddr);
599137894Sscottl			if (run_filter(dmat, paddr) != 0) {
600113228Sjake				needbounce = 1;
601113228Sjake				map->pagesneeded++;
602113228Sjake			}
603113228Sjake			vaddr += PAGE_SIZE;
604113228Sjake		}
605137142Sscottl		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
606113228Sjake	}
607113228Sjake
608113228Sjake	/* Reserve Necessary Bounce Pages */
609113228Sjake	if (map->pagesneeded != 0) {
610113228Sjake		mtx_lock(&bounce_lock);
611113472Ssimokawa		if (flags & BUS_DMA_NOWAIT) {
612113472Ssimokawa			if (reserve_bounce_pages(dmat, map, 0) != 0) {
613113472Ssimokawa				mtx_unlock(&bounce_lock);
614113472Ssimokawa				return (ENOMEM);
615113472Ssimokawa			}
616113472Ssimokawa		} else {
617113472Ssimokawa			if (reserve_bounce_pages(dmat, map, 1) != 0) {
618132545Sscottl				/* Queue us for resources */
619113472Ssimokawa				map->dmat = dmat;
620113472Ssimokawa				map->buf = buf;
621113472Ssimokawa				map->buflen = buflen;
622113472Ssimokawa				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
623117136Smux				    map, links);
624113472Ssimokawa				mtx_unlock(&bounce_lock);
625113472Ssimokawa				return (EINPROGRESS);
626113472Ssimokawa			}
627113228Sjake		}
628113228Sjake		mtx_unlock(&bounce_lock);
629113228Sjake	}
630113228Sjake
631137142Sscottl	vaddr = (vm_offset_t)buf;
632104486Ssam	lastaddr = *lastaddrp;
633113228Sjake	bmask = ~(dmat->boundary - 1);
634104486Ssam
635104486Ssam	for (seg = *segp; buflen > 0 ; ) {
636104486Ssam		/*
637104486Ssam		 * Get the physical address for this segment.
638104486Ssam		 */
639104486Ssam		if (pmap)
640104486Ssam			curaddr = pmap_extract(pmap, vaddr);
641104486Ssam		else
642104486Ssam			curaddr = pmap_kextract(vaddr);
643104486Ssam
644104486Ssam		/*
645104486Ssam		 * Compute the segment size, and adjust counts.
646104486Ssam		 */
647104486Ssam		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
648104486Ssam		if (buflen < sgsize)
649104486Ssam			sgsize = buflen;
650104486Ssam
651104486Ssam		/*
652104486Ssam		 * Make sure we don't cross any boundaries.
653104486Ssam		 */
654104486Ssam		if (dmat->boundary > 0) {
655104486Ssam			baddr = (curaddr + dmat->boundary) & bmask;
656104486Ssam			if (sgsize > (baddr - curaddr))
657104486Ssam				sgsize = (baddr - curaddr);
658104486Ssam		}
659104486Ssam
660137894Sscottl		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
661113228Sjake			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
662113228Sjake
663104486Ssam		/*
664104486Ssam		 * Insert chunk into a segment, coalescing with
665104486Ssam		 * previous segment if possible.
666104486Ssam		 */
667104486Ssam		if (first) {
668104486Ssam			segs[seg].ds_addr = curaddr;
669104486Ssam			segs[seg].ds_len = sgsize;
670104486Ssam			first = 0;
671104486Ssam		} else {
672113228Sjake			if (needbounce == 0 && curaddr == lastaddr &&
673104486Ssam			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
674104486Ssam			    (dmat->boundary == 0 ||
675104486Ssam			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
676104486Ssam				segs[seg].ds_len += sgsize;
677104486Ssam			else {
678104486Ssam				if (++seg >= dmat->nsegments)
679104486Ssam					break;
680104486Ssam				segs[seg].ds_addr = curaddr;
681104486Ssam				segs[seg].ds_len = sgsize;
682104486Ssam			}
683104486Ssam		}
684104486Ssam
685104486Ssam		lastaddr = curaddr + sgsize;
686104486Ssam		vaddr += sgsize;
687104486Ssam		buflen -= sgsize;
688104486Ssam	}
689104486Ssam
690104486Ssam	*segp = seg;
691104486Ssam	*lastaddrp = lastaddr;
692104486Ssam
693104486Ssam	/*
694104486Ssam	 * Did we fit?
695104486Ssam	 */
696104486Ssam	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
697104486Ssam}
698104486Ssam
699104486Ssam/*
700113459Ssimokawa * Map the buffer buf into bus space using the dmamap map.
701113459Ssimokawa */
702113459Ssimokawaint
703113459Ssimokawabus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
704113459Ssimokawa		bus_size_t buflen, bus_dmamap_callback_t *callback,
705113459Ssimokawa		void *callback_arg, int flags)
706113459Ssimokawa{
707113492Smux	bus_addr_t		lastaddr = 0;
708113459Ssimokawa	int			error, nsegs = 0;
709113459Ssimokawa
710113472Ssimokawa	if (map != NULL) {
711113472Ssimokawa		flags |= BUS_DMA_WAITOK;
712113472Ssimokawa		map->callback = callback;
713113472Ssimokawa		map->callback_arg = callback_arg;
714113472Ssimokawa	}
715113472Ssimokawa
716118246Sscottl	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
717118246Sscottl	     &lastaddr, &nsegs, 1);
718113459Ssimokawa
719136805Srwatson	if (error == EINPROGRESS) {
720136805Srwatson		CTR3(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x "
721136805Srwatson		    "error %d", dmat, dmat->flags, error);
722113492Smux		return (error);
723136805Srwatson	}
724113472Ssimokawa
725113459Ssimokawa	if (error)
726118246Sscottl		(*callback)(callback_arg, dmat->segments, 0, error);
727113459Ssimokawa	else
728118246Sscottl		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
729113459Ssimokawa
730136805Srwatson	CTR2(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x error 0",
731136805Srwatson	    dmat, dmat->flags);
732113459Ssimokawa	return (0);
733113459Ssimokawa}
734113459Ssimokawa
735113459Ssimokawa
736113459Ssimokawa/*
737104486Ssam * Like _bus_dmamap_load(), but for mbufs.
738104486Ssam */
739104486Ssamint
740104486Ssambus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
741104486Ssam		     struct mbuf *m0,
742104486Ssam		     bus_dmamap_callback2_t *callback, void *callback_arg,
743104486Ssam		     int flags)
744104486Ssam{
745104486Ssam	int nsegs, error;
746104486Ssam
747117136Smux	M_ASSERTPKTHDR(m0);
748104486Ssam
749113472Ssimokawa	flags |= BUS_DMA_NOWAIT;
750104486Ssam	nsegs = 0;
751104486Ssam	error = 0;
752104486Ssam	if (m0->m_pkthdr.len <= dmat->maxsize) {
753104486Ssam		int first = 1;
754113228Sjake		bus_addr_t lastaddr = 0;
755104486Ssam		struct mbuf *m;
756104486Ssam
757104486Ssam		for (m = m0; m != NULL && error == 0; m = m->m_next) {
758110335Sharti			if (m->m_len > 0) {
759113228Sjake				error = _bus_dmamap_load_buffer(dmat, map,
760110335Sharti						m->m_data, m->m_len,
761110335Sharti						NULL, flags, &lastaddr,
762110335Sharti						&nsegs, first);
763110335Sharti				first = 0;
764110335Sharti			}
765104486Ssam		}
766104486Ssam	} else {
767104486Ssam		error = EINVAL;
768104486Ssam	}
769104486Ssam
770104486Ssam	if (error) {
771104486Ssam		/* force "no valid mappings" in callback */
772118246Sscottl		(*callback)(callback_arg, dmat->segments, 0, 0, error);
773104486Ssam	} else {
774118246Sscottl		(*callback)(callback_arg, dmat->segments,
775104486Ssam			    nsegs+1, m0->m_pkthdr.len, error);
776104486Ssam	}
777136805Srwatson	CTR3(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x "
778136805Srwatson	    "error %d", dmat, dmat->flags, error);
779104486Ssam	return (error);
780104486Ssam}
781104486Ssam
782104486Ssam/*
783104486Ssam * Like _bus_dmamap_load(), but for uios.
784104486Ssam */
785104486Ssamint
786104486Ssambus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
787104486Ssam		    struct uio *uio,
788104486Ssam		    bus_dmamap_callback2_t *callback, void *callback_arg,
789104486Ssam		    int flags)
790104486Ssam{
791113228Sjake	bus_addr_t lastaddr;
792104486Ssam	int nsegs, error, first, i;
793104486Ssam	bus_size_t resid;
794104486Ssam	struct iovec *iov;
795137142Sscottl	pmap_t pmap;
796104486Ssam
797113472Ssimokawa	flags |= BUS_DMA_NOWAIT;
798104486Ssam	resid = uio->uio_resid;
799104486Ssam	iov = uio->uio_iov;
800104486Ssam
801104486Ssam	if (uio->uio_segflg == UIO_USERSPACE) {
802137142Sscottl		KASSERT(uio->uio_td != NULL,
803104486Ssam			("bus_dmamap_load_uio: USERSPACE but no proc"));
804137142Sscottl		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
805137142Sscottl	} else
806137142Sscottl		pmap = NULL;
807104486Ssam
808104486Ssam	nsegs = 0;
809104486Ssam	error = 0;
810104486Ssam	first = 1;
811104486Ssam	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
812104486Ssam		/*
813104486Ssam		 * Now at the first iovec to load.  Load each iovec
814104486Ssam		 * until we have exhausted the residual count.
815104486Ssam		 */
816104486Ssam		bus_size_t minlen =
817104486Ssam			resid < iov[i].iov_len ? resid : iov[i].iov_len;
818104486Ssam		caddr_t addr = (caddr_t) iov[i].iov_base;
819104486Ssam
820110335Sharti		if (minlen > 0) {
821113228Sjake			error = _bus_dmamap_load_buffer(dmat, map,
822110335Sharti					addr, minlen,
823137142Sscottl					pmap, flags, &lastaddr, &nsegs, first);
824110335Sharti			first = 0;
825104486Ssam
826110335Sharti			resid -= minlen;
827110335Sharti		}
828104486Ssam	}
829104486Ssam
830104486Ssam	if (error) {
831104486Ssam		/* force "no valid mappings" in callback */
832118246Sscottl		(*callback)(callback_arg, dmat->segments, 0, 0, error);
833104486Ssam	} else {
834118246Sscottl		(*callback)(callback_arg, dmat->segments,
835104486Ssam			    nsegs+1, uio->uio_resid, error);
836104486Ssam	}
837136805Srwatson	CTR3(KTR_BUSDMA, "bus_dmamap_load_uio: tag %p tag flags 0x%x "
838136805Srwatson	    "error %d", dmat, dmat->flags, error);
839104486Ssam	return (error);
840104486Ssam}
841104486Ssam
842104486Ssam/*
84332516Sgibbs * Release the mapping held by map.
84432516Sgibbs */
84532516Sgibbsvoid
84632516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
84732516Sgibbs{
84832516Sgibbs	struct bounce_page *bpage;
84932516Sgibbs
85032516Sgibbs	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
85132516Sgibbs		STAILQ_REMOVE_HEAD(&map->bpages, links);
85232516Sgibbs		free_bounce_page(dmat, bpage);
85332516Sgibbs	}
85432516Sgibbs}
85532516Sgibbs
85632516Sgibbsvoid
857115343Sscottl_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
85832516Sgibbs{
85932516Sgibbs	struct bounce_page *bpage;
86032516Sgibbs
86132516Sgibbs	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
86232516Sgibbs		/*
86332516Sgibbs		 * Handle data bouncing.  We might also
86432516Sgibbs		 * want to add support for invalidating
86532516Sgibbs		 * the caches on broken hardware
86632516Sgibbs		 */
867137445Sscottl		dmat->bounce_zone->total_bounced++;
868136805Srwatson		CTR3(KTR_BUSDMA, "_bus_dmamap_sync: tag %p tag flags 0x%x "
869136805Srwatson		    "op 0x%x performing bounce", op, dmat, dmat->flags);
870131529Sscottl
871113347Smux		if (op & BUS_DMASYNC_PREWRITE) {
87232516Sgibbs			while (bpage != NULL) {
87332516Sgibbs				bcopy((void *)bpage->datavaddr,
87432516Sgibbs				      (void *)bpage->vaddr,
87532516Sgibbs				      bpage->datacount);
87632516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
87732516Sgibbs			}
878113347Smux		}
87932516Sgibbs
880113347Smux		if (op & BUS_DMASYNC_POSTREAD) {
88132516Sgibbs			while (bpage != NULL) {
88232516Sgibbs				bcopy((void *)bpage->vaddr,
88332516Sgibbs				      (void *)bpage->datavaddr,
88432516Sgibbs				      bpage->datacount);
88532516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
88632516Sgibbs			}
88732516Sgibbs		}
88832516Sgibbs	}
88932516Sgibbs}
89032516Sgibbs
891112346Smuxstatic void
892112346Smuxinit_bounce_pages(void *dummy __unused)
893112346Smux{
894112346Smux
895112346Smux	total_bpages = 0;
896137445Sscottl	STAILQ_INIT(&bounce_zone_list);
897112346Smux	STAILQ_INIT(&bounce_map_waitinglist);
898112346Smux	STAILQ_INIT(&bounce_map_callbacklist);
899112346Smux	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
900112346Smux}
901112346SmuxSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
902112346Smux
903137445Sscottlstatic struct sysctl_ctx_list *
904137445Sscottlbusdma_sysctl_tree(struct bounce_zone *bz)
905137445Sscottl{
906137445Sscottl	return (&bz->sysctl_tree);
907137445Sscottl}
908137445Sscottl
909137445Sscottlstatic struct sysctl_oid *
910137445Sscottlbusdma_sysctl_tree_top(struct bounce_zone *bz)
911137445Sscottl{
912137445Sscottl	return (bz->sysctl_tree_top);
913137445Sscottl}
914137445Sscottl
915137965Sscottlstatic int
916137445Sscottlalloc_bounce_zone(bus_dma_tag_t dmat)
917137445Sscottl{
918137445Sscottl	struct bounce_zone *bz;
919137445Sscottl
920137965Sscottl	/* Check to see if we already have a suitable zone */
921137965Sscottl	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
922137965Sscottl		if ((dmat->alignment <= bz->alignment)
923137965Sscottl		 && (dmat->boundary <= bz->boundary)
924137965Sscottl		 && (dmat->lowaddr >= bz->lowaddr)) {
925137965Sscottl			dmat->bounce_zone = bz;
926137965Sscottl			return (0);
927137965Sscottl		}
928137965Sscottl	}
929137965Sscottl
930137445Sscottl	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
931137445Sscottl	    M_NOWAIT | M_ZERO)) == NULL)
932137965Sscottl		return (ENOMEM);
933137445Sscottl
934137445Sscottl	STAILQ_INIT(&bz->bounce_page_list);
935137445Sscottl	bz->free_bpages = 0;
936137445Sscottl	bz->reserved_bpages = 0;
937137445Sscottl	bz->active_bpages = 0;
938137445Sscottl	bz->lowaddr = dmat->lowaddr;
939137445Sscottl	bz->alignment = dmat->alignment;
940137445Sscottl	bz->boundary = dmat->boundary;
941137445Sscottl	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
942137445Sscottl	busdma_zonecount++;
943137460Sscottl	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
944137445Sscottl	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
945137965Sscottl	dmat->bounce_zone = bz;
946137445Sscottl
947137445Sscottl	sysctl_ctx_init(&bz->sysctl_tree);
948137445Sscottl	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
949137445Sscottl	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
950137445Sscottl	    CTLFLAG_RD, 0, "");
951137445Sscottl	if (bz->sysctl_tree_top == NULL) {
952137445Sscottl		sysctl_ctx_free(&bz->sysctl_tree);
953137965Sscottl		return (0);	/* XXX error code? */
954137445Sscottl	}
955137445Sscottl
956137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
957137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
958137965Sscottl	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
959137965Sscottl	    "Totoal bounce pages");
960137965Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
961137965Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
962137445Sscottl	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
963137445Sscottl	    "Free bounce pages");
964137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
965137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
966137445Sscottl	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
967137445Sscottl	    "Reserved bounce pages");
968137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
969137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
970137445Sscottl	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
971137445Sscottl	    "Active bounce pages");
972137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
973137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
974137445Sscottl	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
975137445Sscottl	    "Total bounce requests");
976137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
977137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
978137445Sscottl	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
979137445Sscottl	    "Total bounce requests that were deferred");
980137445Sscottl	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
981137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
982137445Sscottl	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
983137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
984137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
985137445Sscottl	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
986137445Sscottl	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
987137445Sscottl	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
988137445Sscottl	    "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
989137445Sscottl
990137965Sscottl	return (0);
991137445Sscottl}
992137445Sscottl
99332516Sgibbsstatic int
99432516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
99532516Sgibbs{
996137445Sscottl	struct bounce_zone *bz;
99732516Sgibbs	int count;
99832516Sgibbs
999137445Sscottl	bz = dmat->bounce_zone;
100032516Sgibbs	count = 0;
100132516Sgibbs	while (numpages > 0) {
100232516Sgibbs		struct bounce_page *bpage;
100332516Sgibbs
100432516Sgibbs		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
100569781Sdwmalone						     M_NOWAIT | M_ZERO);
100632516Sgibbs
100732516Sgibbs		if (bpage == NULL)
100832516Sgibbs			break;
100932516Sgibbs		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
101032516Sgibbs							 M_NOWAIT, 0ul,
1011137445Sscottl							 bz->lowaddr,
1012132545Sscottl							 PAGE_SIZE,
1013137445Sscottl							 bz->boundary);
1014102241Sarchie		if (bpage->vaddr == 0) {
101532516Sgibbs			free(bpage, M_DEVBUF);
101632516Sgibbs			break;
101732516Sgibbs		}
101832516Sgibbs		bpage->busaddr = pmap_kextract(bpage->vaddr);
1019112346Smux		mtx_lock(&bounce_lock);
1020137445Sscottl		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
102132516Sgibbs		total_bpages++;
1022137965Sscottl		bz->total_bpages++;
1023137445Sscottl		bz->free_bpages++;
1024112346Smux		mtx_unlock(&bounce_lock);
102532516Sgibbs		count++;
102632516Sgibbs		numpages--;
102732516Sgibbs	}
102832516Sgibbs	return (count);
102932516Sgibbs}
103032516Sgibbs
103132516Sgibbsstatic int
1032113228Sjakereserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
103332516Sgibbs{
1034137445Sscottl	struct bounce_zone *bz;
103532516Sgibbs	int pages;
103632516Sgibbs
1037112346Smux	mtx_assert(&bounce_lock, MA_OWNED);
1038137445Sscottl	bz = dmat->bounce_zone;
1039137445Sscottl	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1040113228Sjake	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1041113228Sjake		return (map->pagesneeded - (map->pagesreserved + pages));
1042137445Sscottl	bz->free_bpages -= pages;
1043137445Sscottl	bz->reserved_bpages += pages;
104432516Sgibbs	map->pagesreserved += pages;
104532516Sgibbs	pages = map->pagesneeded - map->pagesreserved;
104632516Sgibbs
104732516Sgibbs	return (pages);
104832516Sgibbs}
104932516Sgibbs
1050112569Sjakestatic bus_addr_t
105132516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
105232516Sgibbs		bus_size_t size)
105332516Sgibbs{
1054137445Sscottl	struct bounce_zone *bz;
105532516Sgibbs	struct bounce_page *bpage;
105632516Sgibbs
1057137445Sscottl	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1058113228Sjake	KASSERT(map != NULL && map != &nobounce_dmamap,
1059113228Sjake	    ("add_bounce_page: bad map %p", map));
1060113228Sjake
1061137445Sscottl	bz = dmat->bounce_zone;
106232516Sgibbs	if (map->pagesneeded == 0)
106332516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
106432516Sgibbs	map->pagesneeded--;
106532516Sgibbs
106632516Sgibbs	if (map->pagesreserved == 0)
106732516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
106832516Sgibbs	map->pagesreserved--;
106932516Sgibbs
1070112346Smux	mtx_lock(&bounce_lock);
1071137445Sscottl	bpage = STAILQ_FIRST(&bz->bounce_page_list);
107232516Sgibbs	if (bpage == NULL)
107332516Sgibbs		panic("add_bounce_page: free page list is empty");
107432516Sgibbs
1075137445Sscottl	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1076137445Sscottl	bz->reserved_bpages--;
1077137445Sscottl	bz->active_bpages++;
1078112346Smux	mtx_unlock(&bounce_lock);
107932516Sgibbs
108032516Sgibbs	bpage->datavaddr = vaddr;
108132516Sgibbs	bpage->datacount = size;
108232516Sgibbs	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
108332516Sgibbs	return (bpage->busaddr);
108432516Sgibbs}
108532516Sgibbs
108632516Sgibbsstatic void
108732516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
108832516Sgibbs{
108932516Sgibbs	struct bus_dmamap *map;
1090137445Sscottl	struct bounce_zone *bz;
109132516Sgibbs
1092137445Sscottl	bz = dmat->bounce_zone;
109332516Sgibbs	bpage->datavaddr = 0;
109432516Sgibbs	bpage->datacount = 0;
109532516Sgibbs
1096112346Smux	mtx_lock(&bounce_lock);
1097137445Sscottl	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1098137445Sscottl	bz->free_bpages++;
1099137445Sscottl	bz->active_bpages--;
110032516Sgibbs	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1101113228Sjake		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
110232516Sgibbs			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
110332516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
110432516Sgibbs					   map, links);
110532516Sgibbs			busdma_swi_pending = 1;
1106137445Sscottl			bz->total_deferred++;
110788900Sjhb			swi_sched(vm_ih, 0);
110832516Sgibbs		}
110932516Sgibbs	}
1110112346Smux	mtx_unlock(&bounce_lock);
111132516Sgibbs}
111232516Sgibbs
111332516Sgibbsvoid
111495076Salfredbusdma_swi(void)
111532516Sgibbs{
1116117126Sscottl	bus_dma_tag_t dmat;
111732516Sgibbs	struct bus_dmamap *map;
111832516Sgibbs
1119112346Smux	mtx_lock(&bounce_lock);
112032516Sgibbs	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
112132516Sgibbs		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1122112346Smux		mtx_unlock(&bounce_lock);
1123117136Smux		dmat = map->dmat;
1124117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
112532516Sgibbs		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
112632516Sgibbs				map->callback, map->callback_arg, /*flags*/0);
1127117126Sscottl		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1128112346Smux		mtx_lock(&bounce_lock);
112932516Sgibbs	}
1130112346Smux	mtx_unlock(&bounce_lock);
113132516Sgibbs}
1132