busdma_machdep.c revision 191011
1139823Simp/*-
21541Srgrimes * Copyright (c) 1997, 1998 Justin T. Gibbs.
31541Srgrimes * All rights reserved.
41541Srgrimes *
51541Srgrimes * Redistribution and use in source and binary forms, with or without
61541Srgrimes * modification, are permitted provided that the following conditions
71541Srgrimes * are met:
81541Srgrimes * 1. Redistributions of source code must retain the above copyright
91541Srgrimes *    notice, this list of conditions, and the following disclaimer,
101541Srgrimes *    without modification, immediately at the beginning of the file.
111541Srgrimes * 2. The name of the author may not be used to endorse or promote products
121541Srgrimes *    derived from this software without specific prior written permission.
131541Srgrimes *
141541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
151541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
181541Srgrimes * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241541Srgrimes * SUCH DAMAGE.
251541Srgrimes */
261541Srgrimes
271541Srgrimes#include <sys/cdefs.h>
281541Srgrimes__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 191011 2009-04-13 19:20:32Z kib $");
291541Srgrimes
3050477Speter#include <sys/param.h>
311541Srgrimes#include <sys/kdb.h>
321541Srgrimes#include <ddb/ddb.h>
332169Spaul#include <ddb/db_output.h>
34121498Sume#include <sys/systm.h>
352169Spaul#include <sys/malloc.h>
3695336Smike#include <sys/bus.h>
3793514Smike#include <sys/interrupt.h>
3895336Smike#include <sys/kernel.h>
3993514Smike#include <sys/ktr.h>
4095336Smike#include <sys/lock.h>
4195336Smike#include <sys/proc.h>
4295336Smike#include <sys/mutex.h>
4395336Smike#include <sys/mbuf.h>
4495336Smike#include <sys/uio.h>
4595336Smike#include <sys/sysctl.h>
4695336Smike
4795336Smike#include <vm/vm.h>
4895336Smike#include <vm/vm_page.h>
4995336Smike#include <vm/vm_map.h>
5095336Smike
5195336Smike#include <machine/atomic.h>
5295336Smike#include <machine/bus.h>
5395336Smike#include <machine/md_var.h>
5495336Smike#include <machine/specialreg.h>
5595336Smike
5695336Smike#define MAX_BPAGES 512
5795336Smike#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
5895336Smike#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
5995336Smike
6095336Smikestruct bounce_zone;
6195336Smike
6295336Smikestruct bus_dma_tag {
6395336Smike	bus_dma_tag_t	  parent;
6495336Smike	bus_size_t	  alignment;
6595336Smike	bus_size_t	  boundary;
6695336Smike	bus_addr_t	  lowaddr;
6795336Smike	bus_addr_t	  highaddr;
6895336Smike	bus_dma_filter_t *filter;
6995336Smike	void		 *filterarg;
7095336Smike	bus_size_t	  maxsize;
7195336Smike	u_int		  nsegments;
7295336Smike	bus_size_t	  maxsegsz;
7395336Smike	int		  flags;
74102227Smike	int		  ref_count;
75102227Smike	int		  map_count;
76102227Smike	bus_dma_lock_t	 *lockfunc;
7795336Smike	void		 *lockfuncarg;
7895336Smike	bus_dma_segment_t *segments;
7995336Smike	struct bounce_zone *bounce_zone;
8095336Smike};
8195336Smike
8295336Smikestruct bounce_page {
8395336Smike	vm_offset_t	vaddr;		/* kva of bounce buffer */
8495336Smike	bus_addr_t	busaddr;	/* Physical address */
8595336Smike	vm_offset_t	datavaddr;	/* kva of client data */
8695336Smike	bus_size_t	datacount;	/* client data count */
87170613Sbms	STAILQ_ENTRY(bounce_page) links;
88170613Sbms};
89170613Sbms
90170613Sbmsint busdma_swi_pending;
91170613Sbms
92196967Sphkstruct bounce_zone {
93170613Sbms	STAILQ_ENTRY(bounce_zone) links;
9495336Smike	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
9595336Smike	int		total_bpages;
9695336Smike	int		free_bpages;
9795336Smike	int		reserved_bpages;
9895336Smike	int		active_bpages;
9995336Smike	int		total_bounced;
10095336Smike	int		total_deferred;
10195336Smike	int		map_count;
10295336Smike	bus_size_t	alignment;
103189829Sdas	bus_size_t	boundary;
10495336Smike	bus_addr_t	lowaddr;
10595336Smike	char		zoneid[8];
10695336Smike	char		lowaddrid[20];
10795336Smike	struct sysctl_ctx_list sysctl_tree;
10895336Smike	struct sysctl_oid *sysctl_tree_top;
10995336Smike};
11095336Smike
11195336Smikestatic struct mtx bounce_lock;
11295336Smikestatic int total_bpages;
11395336Smikestatic int busdma_zonecount;
11495336Smikestatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
11595336Smike
11695336SmikeSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
11795336SmikeSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
11895336Smike	   "Total bounce pages");
11995336Smike
12095336Smikestruct bus_dmamap {
12195336Smike	struct bp_list	       bpages;
12295336Smike	int		       pagesneeded;
123189829Sdas	int		       pagesreserved;
12495336Smike	bus_dma_tag_t	       dmat;
12595336Smike	void		      *buf;		/* unmapped buffer pointer */
12695336Smike	bus_size_t	       buflen;		/* unmapped buffer length */
12795336Smike	bus_dmamap_callback_t *callback;
12895336Smike	void		      *callback_arg;
12995336Smike	STAILQ_ENTRY(bus_dmamap) links;
13095336Smike};
1311541Srgrimes
1321541Srgrimesstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
1331541Srgrimesstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
1341541Srgrimesstatic struct bus_dmamap nobounce_dmamap;
1351541Srgrimes
1361541Srgrimesstatic void init_bounce_pages(void *dummy);
13733804Sjulianstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
1381541Srgrimesstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
13952904Sshinstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
1401541Srgrimes				int commit);
1411541Srgrimesstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
142133874Srwatson				   vm_offset_t vaddr, bus_size_t size);
143133874Srwatsonstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
14433804Sjulianint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
1451541Srgrimesint _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
14633804Sjulian    void *buf, bus_size_t buflen, int flags);
14733804Sjulian
14833804Sjulian#ifdef XEN
1491541Srgrimes#undef pmap_kextract
15033804Sjulian#define pmap_kextract pmap_kextract_ma
15133804Sjulian#endif
15233804Sjulian
15333804Sjulian/*
15433804Sjulian * Return true if a match is made.
15533804Sjulian *
15633804Sjulian * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
15733804Sjulian *
1581541Srgrimes * If paddr is within the bounds of the dma tag then call the filter callback
15933804Sjulian * to check for a match, if there is no filter callback then assume a match.
16033804Sjulian */
16133804Sjulianint
16233804Sjulianrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
16333804Sjulian{
16433804Sjulian	int retval;
165133874Srwatson
16633804Sjulian	retval = 0;
16733804Sjulian
16833804Sjulian	do {
16933804Sjulian		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
17033804Sjulian		 || ((paddr & (dmat->alignment - 1)) != 0))
17133804Sjulian		 && (dmat->filter == NULL
17233804Sjulian		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
17333804Sjulian			retval = 1;
17433804Sjulian
17533804Sjulian		dmat = dmat->parent;
17633804Sjulian	} while (retval == 0 && dmat != NULL);
17752904Sshin	return (retval);
17833804Sjulian}
17952904Sshin
18052904Sshin/*
18133804Sjulian * Convenience function for manipulating driver locks from busdma (during
182133874Srwatson * busdma_swi, for example).  Drivers that don't provide their own locks
18333804Sjulian * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
18433804Sjulian * non-mutex locking scheme don't have to use this at all.
18533804Sjulian */
18652904Sshinvoid
18752904Sshinbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
18833804Sjulian{
18933804Sjulian	struct mtx *dmtx;
19033804Sjulian
191133874Srwatson	dmtx = (struct mtx *)arg;
192133874Srwatson	switch (op) {
193133874Srwatson	case BUS_DMA_LOCK:
19452904Sshin		mtx_lock(dmtx);
19552904Sshin		break;
19652904Sshin	case BUS_DMA_UNLOCK:
19733804Sjulian		mtx_unlock(dmtx);
19833804Sjulian		break;
19933804Sjulian	default:
20033804Sjulian		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
20133804Sjulian	}
20233804Sjulian}
20333804Sjulian
20433804Sjulian/*
20533804Sjulian * dflt_lock should never get called.  It gets put into the dma tag when
20633804Sjulian * lockfunc == NULL, which is only valid if the maps that are associated
20733804Sjulian * with the tag are meant to never be defered.
20833804Sjulian * XXX Should have a way to identify which driver is responsible here.
20933804Sjulian */
21033804Sjulianstatic void
21133804Sjuliandflt_lock(void *arg, bus_dma_lock_op_t op)
21233804Sjulian{
21333804Sjulian	panic("driver error: busdma dflt_lock called");
21433804Sjulian}
21533804Sjulian
2161541Srgrimes/*
21733804Sjulian * Allocate a device specific dma_tag.
21833804Sjulian */
21933804Sjulianint
22033804Sjulianbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
22133804Sjulian		   bus_size_t boundary, bus_addr_t lowaddr,
22233804Sjulian		   bus_addr_t highaddr, bus_dma_filter_t *filter,
22333814Sjulian		   void *filterarg, bus_size_t maxsize, int nsegments,
22433804Sjulian		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
22533804Sjulian		   void *lockfuncarg, bus_dma_tag_t *dmat)
22633804Sjulian{
22733804Sjulian	bus_dma_tag_t newtag;
22833804Sjulian	int error = 0;
22933804Sjulian
23033804Sjulian	/* Basic sanity checking */
23133804Sjulian	if (boundary != 0 && boundary < maxsegsz)
23233804Sjulian		maxsegsz = boundary;
23333804Sjulian
2341541Srgrimes	if (maxsegsz == 0) {
23533804Sjulian		return (EINVAL);
23633804Sjulian	}
23752904Sshin
238153553Sdelphij	/* Return a NULL tag on failure */
23946420Sluigi	*dmat = NULL;
24052904Sshin
241142215Sglebius	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
24246420Sluigi	    M_ZERO | M_NOWAIT);
243130609Smlaier	if (newtag == NULL) {
24433804Sjulian		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
245106152Sfenner		    __func__, newtag, 0, error);
246106152Sfenner		return (ENOMEM);
2471541Srgrimes	}
2481541Srgrimes
24952904Sshin	newtag->parent = parent;
25052904Sshin	newtag->alignment = alignment;
2511541Srgrimes	newtag->boundary = boundary;
252106152Sfenner	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
253106152Sfenner	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
254106152Sfenner	    (PAGE_SIZE - 1);
2551541Srgrimes	newtag->filter = filter;
256136712Sandre	newtag->filterarg = filterarg;
257136712Sandre	newtag->maxsize = maxsize;
258136712Sandre	newtag->nsegments = nsegments;
259136712Sandre	newtag->maxsegsz = maxsegsz;
260136712Sandre	newtag->flags = flags;
261136712Sandre	newtag->ref_count = 1; /* Count ourself */
2621541Srgrimes	newtag->map_count = 0;
26314195Speter	if (lockfunc != NULL) {
26414195Speter		newtag->lockfunc = lockfunc;
26514195Speter		newtag->lockfuncarg = lockfuncarg;
26694291Ssilby	} else {
26794291Ssilby		newtag->lockfunc = dflt_lock;
26814195Speter		newtag->lockfuncarg = NULL;
26914195Speter	}
27014195Speter	newtag->segments = NULL;
27114195Speter
27214195Speter	/* Take into account any restrictions imposed by our parent tag */
27314195Speter	if (parent != NULL) {
27414195Speter		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
27514195Speter		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
276176805Srpaulo		if (newtag->boundary == 0)
27714195Speter			newtag->boundary = parent->boundary;
27814195Speter		else if (parent->boundary != 0)
27914195Speter			newtag->boundary = MIN(parent->boundary,
28014195Speter					       newtag->boundary);
28114195Speter		if ((newtag->filter != NULL) ||
28214195Speter		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
28314195Speter			newtag->flags |= BUS_DMA_COULD_BOUNCE;
28417541Speter		if (newtag->filter == NULL) {
28514195Speter			/*
28614195Speter			 * Short circuit looking at our parent directly
287108533Sschweikh			 * since we have encapsulated all of its information
28814195Speter			 */
28914195Speter			newtag->filter = parent->filter;
29014195Speter			newtag->filterarg = parent->filterarg;
29114195Speter			newtag->parent = parent->parent;
29214195Speter		}
29335304Sphk		if (newtag->parent != NULL)
29435304Sphk			atomic_add_int(&parent->ref_count, 1);
29535304Sphk	}
29635304Sphk
29735304Sphk	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
29835304Sphk	 || newtag->alignment > 1)
29935304Sphk		newtag->flags |= BUS_DMA_COULD_BOUNCE;
30035304Sphk
30135304Sphk	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
30235304Sphk	    (flags & BUS_DMA_ALLOCNOW) != 0) {
30335304Sphk		struct bounce_zone *bz;
30414195Speter
30514195Speter		/* Must bounce */
30614195Speter
3071541Srgrimes		if ((error = alloc_bounce_zone(newtag)) != 0) {
30814195Speter			free(newtag, M_DEVBUF);
3091541Srgrimes			return (error);
3101541Srgrimes		}
3111541Srgrimes		bz = newtag->bounce_zone;
3121541Srgrimes
313176805Srpaulo		if (ptoa(bz->total_bpages) < maxsize) {
31413491Speter			int pages;
315176805Srpaulo
316176805Srpaulo			pages = atop(maxsize) - bz->total_bpages;
317176805Srpaulo
318176805Srpaulo			/* Add pages to our bounce pool */
319176805Srpaulo			if (alloc_bounce_pages(newtag, pages) < pages)
320176805Srpaulo				error = ENOMEM;
32135304Sphk		}
32235304Sphk		/* Performed initial allocation */
32313491Speter		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
32413491Speter	}
32517541Speter
32617541Speter	if (error != 0) {
32717541Speter		free(newtag, M_DEVBUF);
32817541Speter	} else {
32917541Speter		*dmat = newtag;
330133874Srwatson	}
33117541Speter	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
33287158Smike	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
33387158Smike	return (error);
33417541Speter}
3351541Srgrimes
3361541Srgrimesint
3371541Srgrimesbus_dma_tag_destroy(bus_dma_tag_t dmat)
3381541Srgrimes{
33935919Sjb	bus_dma_tag_t dmat_copy;
3401541Srgrimes	int error;
3411541Srgrimes
3421541Srgrimes	error = 0;
3431541Srgrimes	dmat_copy = dmat;
3441541Srgrimes
34535919Sjb	if (dmat != NULL) {
3461541Srgrimes
3471541Srgrimes		if (dmat->map_count != 0) {
3481541Srgrimes			error = EBUSY;
3491541Srgrimes			goto out;
3501541Srgrimes		}
35135919Sjb
3521541Srgrimes		while (dmat != NULL) {
3531541Srgrimes			bus_dma_tag_t parent;
3541541Srgrimes
3551541Srgrimes			parent = dmat->parent;
35635919Sjb			atomic_subtract_int(&dmat->ref_count, 1);
3571541Srgrimes			if (dmat->ref_count == 0) {
3581541Srgrimes				if (dmat->segments != NULL)
3591541Srgrimes					free(dmat->segments, M_DEVBUF);
3601541Srgrimes				free(dmat, M_DEVBUF);
3611541Srgrimes				/*
36235919Sjb				 * Last reference count, so
36335919Sjb				 * release our reference
3641541Srgrimes				 * count on our parent.
365166368Sbms				 */
366178280Sgnn				dmat = parent;
367178280Sgnn			} else
368166368Sbms				dmat = NULL;
369166368Sbms		}
370166368Sbms	}
371166368Sbmsout:
372166368Sbms	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
373166368Sbms	return (error);
374166368Sbms}
375166368Sbms
376166368Sbms/*
37735919Sjb * Allocate a handle for mapping from kva/uva/physical
37855205Speter * address space into bus device space.
3791541Srgrimes */
3801541Srgrimesint
3811541Srgrimesbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
38235919Sjb{
38335919Sjb	int error;
38435919Sjb
385167072Sbms	error = 0;
386142215Sglebius
387130609Smlaier	if (dmat->segments == NULL) {
388114259Smdodd		dmat->segments = (bus_dma_segment_t *)malloc(
38935919Sjb		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
3901541Srgrimes		    M_NOWAIT);
3911541Srgrimes		if (dmat->segments == NULL) {
3921541Srgrimes			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
3931541Srgrimes			    __func__, dmat, ENOMEM);
3941541Srgrimes			return (ENOMEM);
3951541Srgrimes		}
3961541Srgrimes	}
3971541Srgrimes
3981541Srgrimes	/*
3991541Srgrimes	 * Bouncing might be required if the driver asks for an active
4001541Srgrimes	 * exclusion region, a data alignment that is stricter than 1, and/or
4011541Srgrimes	 * an active address boundary.
4021541Srgrimes	 */
4031541Srgrimes	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
404105651Siedowse
4051541Srgrimes		/* Must bounce */
406170613Sbms		struct bounce_zone *bz;
407170613Sbms		int maxpages;
4081541Srgrimes
4091541Srgrimes		if (dmat->bounce_zone == NULL) {
4101541Srgrimes			if ((error = alloc_bounce_zone(dmat)) != 0)
4111541Srgrimes				return (error);
412133874Srwatson		}
413133874Srwatson		bz = dmat->bounce_zone;
414133874Srwatson
415133874Srwatson		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
416133874Srwatson					     M_NOWAIT | M_ZERO);
417133874Srwatson		if (*mapp == NULL) {
41819622Sfenner			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
41952904Sshin			    __func__, dmat, ENOMEM);
42052904Sshin			return (ENOMEM);
42152904Sshin		}
4221541Srgrimes
423119178Sbms		/* Initialize the new map */
424193217Spjd		STAILQ_INIT(&((*mapp)->bpages));
425119178Sbms
426200023Sluigi		/*
427200023Sluigi		 * Attempt to add pages to our pool on a per-instance
428200023Sluigi		 * basis up to a sane limit.
429200023Sluigi		 */
430200023Sluigi		if (dmat->alignment > 1)
431130281Sru			maxpages = MAX_BPAGES;
432130281Sru		else
433130281Sru			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
434130281Sru		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
435130281Sru		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
436130281Sru			int pages;
437200023Sluigi
438200023Sluigi			pages = MAX(atop(dmat->maxsize), 1);
439200023Sluigi			pages = MIN(maxpages - bz->total_bpages, pages);
440133874Srwatson			pages = MAX(pages, 1);
441133874Srwatson			if (alloc_bounce_pages(dmat, pages) < pages)
442133874Srwatson				error = ENOMEM;
443133874Srwatson
444133874Srwatson			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
44552904Sshin				if (error == 0)
44617758Ssos					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
447165648Spiso			} else {
448165648Spiso				error = 0;
449165648Spiso			}
450165648Spiso		}
451165648Spiso		bz->map_count++;
45241793Sluigi	} else {
45341793Sluigi		*mapp = NULL;
45441793Sluigi	}
45541793Sluigi	if (error == 0)
45641793Sluigi		dmat->map_count++;
457114258Smdodd	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
458149371Sandre	    __func__, dmat, dmat->flags, error);
459150594Sandre	return (error);
460114258Smdodd}
461170613Sbms
462170613Sbms/*
463170613Sbms * Destroy a handle for mapping from kva/uva/physical
464170613Sbms * address space into bus device space.
465170613Sbms */
466170613Sbmsint
467170613Sbmsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
468170613Sbms{
469170613Sbms	if (map != NULL && map != &nobounce_dmamap) {
470170613Sbms		if (STAILQ_FIRST(&map->bpages) != NULL) {
471170613Sbms			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
472170613Sbms			    __func__, dmat, EBUSY);
473170613Sbms			return (EBUSY);
474170613Sbms		}
475170613Sbms		if (dmat->bounce_zone)
476170613Sbms			dmat->bounce_zone->map_count--;
477170613Sbms		free(map, M_DEVBUF);
4781541Srgrimes	}
4791541Srgrimes	dmat->map_count--;
4801541Srgrimes	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
4811541Srgrimes	return (0);
4821541Srgrimes}
4831541Srgrimes
4841541Srgrimes
485158563Sbms/*
486158563Sbms * Allocate a piece of memory that can be efficiently mapped into
487158563Sbms * bus device space based on the constraints lited in the dma tag.
488158563Sbms * A dmamap to for use with dmamap_load is also allocated.
489158563Sbms */
490158563Sbmsint
491189346Sbmsbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
492158563Sbms		 bus_dmamap_t *mapp)
493158563Sbms{
494189346Sbms	int mflags;
495189346Sbms
496189346Sbms	if (flags & BUS_DMA_NOWAIT)
497189346Sbms		mflags = M_NOWAIT;
498189346Sbms	else
499189592Sbms		mflags = M_WAITOK;
500189346Sbms
501189346Sbms	/* If we succeed, no mapping/bouncing will be required */
5021541Srgrimes	*mapp = NULL;
5031541Srgrimes
5041541Srgrimes	if (dmat->segments == NULL) {
5051541Srgrimes		dmat->segments = (bus_dma_segment_t *)malloc(
5061541Srgrimes		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
5071541Srgrimes		    mflags);
5081541Srgrimes		if (dmat->segments == NULL) {
5091541Srgrimes			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
510170613Sbms			    __func__, dmat, dmat->flags, ENOMEM);
511170613Sbms			return (ENOMEM);
512170613Sbms		}
513170613Sbms	}
514170613Sbms	if (flags & BUS_DMA_ZERO)
515170613Sbms		mflags |= M_ZERO;
516170613Sbms
517170613Sbms	/*
518170613Sbms	 * XXX:
519170613Sbms	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
520170613Sbms	 * alignment guarantees of malloc need to be nailed down, and the
521170613Sbms	 * code below should be rewritten to take that into account.
522170613Sbms	 *
523170613Sbms	 * In the meantime, we'll warn the user if malloc gets it wrong.
524170613Sbms	 */
525170613Sbms	if ((dmat->maxsize <= PAGE_SIZE) &&
526170613Sbms	   (dmat->alignment < dmat->maxsize) &&
527170613Sbms	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
528170613Sbms		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
529170613Sbms	} else {
530170613Sbms		/*
531170613Sbms		 * XXX Use Contigmalloc until it is merged into this facility
532170613Sbms		 *     and handles multi-seg allocations.  Nobody is doing
533170613Sbms		 *     multi-seg allocations yet though.
534170613Sbms		 * XXX Certain AGP hardware does.
535170613Sbms		 */
536170613Sbms		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
537170613Sbms		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
538170613Sbms		    dmat->boundary);
539170613Sbms	}
540170613Sbms	if (*vaddr == NULL) {
541170613Sbms		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
542170613Sbms		    __func__, dmat, dmat->flags, ENOMEM);
543170613Sbms		return (ENOMEM);
544170613Sbms	} else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
545170613Sbms		printf("bus_dmamem_alloc failed to align memory properly.\n");
546170613Sbms	}
547170613Sbms	if (flags & BUS_DMA_NOCACHE)
548170613Sbms		pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize,
549170613Sbms		    PAT_UNCACHEABLE);
550170613Sbms	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
551170613Sbms	    __func__, dmat, dmat->flags, 0);
552170613Sbms	return (0);
553170613Sbms}
554170613Sbms
555170613Sbms/*
556170613Sbms * Free a piece of memory and it's allociated dmamap, that was allocated
557170613Sbms * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
558170613Sbms */
559170613Sbmsvoid
560170613Sbmsbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
561170613Sbms{
562170613Sbms	/*
563170613Sbms	 * dmamem does not need to be bounced, so the map should be
564170613Sbms	 * NULL
565170613Sbms	 */
566170613Sbms	if (map != NULL)
567170613Sbms		panic("bus_dmamem_free: Invalid map freed\n");
568170613Sbms	pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK);
569170613Sbms	if ((dmat->maxsize <= PAGE_SIZE) &&
570170613Sbms	   (dmat->alignment < dmat->maxsize) &&
571170613Sbms	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
572170613Sbms		free(vaddr, M_DEVBUF);
573170613Sbms	else {
574170613Sbms		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
575170613Sbms	}
576170613Sbms	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
577170613Sbms}
578170613Sbms
579170613Sbmsint
580170613Sbms_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
581170613Sbms    void *buf, bus_size_t buflen, int flags)
582189346Sbms{
583170613Sbms	vm_offset_t vaddr;
584170613Sbms	vm_offset_t vendaddr;
585170613Sbms	bus_addr_t paddr;
586170613Sbms
58714195Speter	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
58814195Speter		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
58914195Speter		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
59014195Speter		    dmat->boundary, dmat->alignment);
59114195Speter		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
59214195Speter		    map, &nobounce_dmamap, map->pagesneeded);
59314195Speter		/*
59414195Speter		 * Count the number of bounce pages
5951541Srgrimes		 * needed in order to complete this transfer
5961541Srgrimes		 */
5971541Srgrimes		vaddr = (vm_offset_t)buf;
5981541Srgrimes		vendaddr = (vm_offset_t)buf + buflen;
5991541Srgrimes
60062587Sitojun		while (vaddr < vendaddr) {
6011541Srgrimes			if (pmap)
6021541Srgrimes				paddr = pmap_extract(pmap, vaddr);
6031541Srgrimes			else
6041541Srgrimes				paddr = pmap_kextract(vaddr);
6051541Srgrimes			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
6061541Srgrimes			    run_filter(dmat, paddr) != 0) {
6071541Srgrimes				map->pagesneeded++;
6081541Srgrimes			}
6091541Srgrimes			vaddr += (PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK));
6101541Srgrimes		}
6111541Srgrimes		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
6121541Srgrimes	}
6131541Srgrimes
6141541Srgrimes	/* Reserve Necessary Bounce Pages */
6151541Srgrimes	if (map->pagesneeded != 0) {
6161541Srgrimes		mtx_lock(&bounce_lock);
6171541Srgrimes		if (flags & BUS_DMA_NOWAIT) {
6181541Srgrimes			if (reserve_bounce_pages(dmat, map, 0) != 0) {
6191541Srgrimes				mtx_unlock(&bounce_lock);
6201541Srgrimes				return (ENOMEM);
6211541Srgrimes			}
6221541Srgrimes		} else {
6231541Srgrimes			if (reserve_bounce_pages(dmat, map, 1) != 0) {
6241541Srgrimes				/* Queue us for resources */
6251541Srgrimes				map->dmat = dmat;
62662587Sitojun				map->buf = buf;
62762587Sitojun				map->buflen = buflen;
62862587Sitojun				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
62962587Sitojun				    map, links);
63062587Sitojun				mtx_unlock(&bounce_lock);
63162587Sitojun				return (EINPROGRESS);
63262587Sitojun			}
63362587Sitojun		}
63462587Sitojun		mtx_unlock(&bounce_lock);
63562587Sitojun	}
63662587Sitojun
63762587Sitojun	return (0);
63862587Sitojun}
63962587Sitojun
64062587Sitojun/*
64162587Sitojun * Utility function to load a linear buffer.  lastaddrp holds state
64262587Sitojun * between invocations (for multiple-buffer loads).  segp contains
64362587Sitojun * the starting segment on entrace, and the ending segment on exit.
64462587Sitojun * first indicates if this is the first invocation of this function.
64562587Sitojun */
64662587Sitojunstatic __inline int
64762587Sitojun_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
64862587Sitojun    			bus_dmamap_t map,
64962587Sitojun			void *buf, bus_size_t buflen,
65062587Sitojun			pmap_t pmap,
65162587Sitojun			int flags,
65262587Sitojun			bus_addr_t *lastaddrp,
65362587Sitojun			bus_dma_segment_t *segs,
65462587Sitojun			int *segp,
655118622Shsu			int first)
656118622Shsu{
657118622Shsu	bus_size_t sgsize;
658118622Shsu	bus_addr_t curaddr, lastaddr, baddr, bmask;
659118622Shsu	vm_offset_t vaddr;
660118622Shsu	int seg, error;
661118622Shsu
662118622Shsu	if (map == NULL)
663118622Shsu		map = &nobounce_dmamap;
664118622Shsu
665118622Shsu	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
666118622Shsu		error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags);
667118622Shsu		if (error)
668118622Shsu			return (error);
669118622Shsu	}
670118622Shsu
6711541Srgrimes	vaddr = (vm_offset_t)buf;
6721541Srgrimes	lastaddr = *lastaddrp;
6731541Srgrimes	bmask = ~(dmat->boundary - 1);
6741541Srgrimes
6751541Srgrimes	for (seg = *segp; buflen > 0 ; ) {
6761541Srgrimes		/*
6771541Srgrimes		 * Get the physical address for this segment.
6781541Srgrimes		 */
6791541Srgrimes		if (pmap)
6801541Srgrimes			curaddr = pmap_extract(pmap, vaddr);
6811541Srgrimes		else
682133874Srwatson			curaddr = pmap_kextract(vaddr);
683133874Srwatson
684133874Srwatson		/*
6857091Swollman		 * Compute the segment size, and adjust counts.
6869575Speter		 */
687133874Srwatson		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
68852904Sshin		if (sgsize > dmat->maxsegsz)
68929838Swollman			sgsize = dmat->maxsegsz;
69033440Sguido		if (buflen < sgsize)
69152904Sshin			sgsize = buflen;
69255009Sshin
69352904Sshin		/*
69452904Sshin		 * Make sure we don't cross any boundaries.
6951541Srgrimes		 */
6961541Srgrimes		if (dmat->boundary > 0) {
6971541Srgrimes			baddr = (curaddr + dmat->boundary) & bmask;
6981541Srgrimes			if (sgsize > (baddr - curaddr))
6991541Srgrimes				sgsize = (baddr - curaddr);
7001541Srgrimes		}
7011541Srgrimes
7025109Swollman		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
7036399Swollman		    map->pagesneeded != 0 && run_filter(dmat, curaddr))
7046399Swollman			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
7057091Swollman
706133874Srwatson		/*
70712003Swollman		 * Insert chunk into a segment, coalescing with
70812003Swollman		 * previous segment if possible.
70929838Swollman		 */
71033440Sguido		if (first) {
71136192Sdg			segs[seg].ds_addr = curaddr;
7121541Srgrimes			segs[seg].ds_len = sgsize;
7131541Srgrimes			first = 0;
71495336Smike		} else {
71595336Smike			if (curaddr == lastaddr &&
71678243Speter			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
71795336Smike			    (dmat->boundary == 0 ||
71878243Speter			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
71978243Speter				segs[seg].ds_len += sgsize;
72092723Salfred			else {
72192723Salfred				if (++seg >= dmat->nsegments)
72292723Salfred					break;
723133486Sandre				segs[seg].ds_addr = curaddr;
724199208Sattilio				segs[seg].ds_len = sgsize;
725133874Srwatson			}
72692723Salfred		}
727150296Srwatson
7282169Spaul		lastaddr = curaddr + sgsize;
729133874Srwatson		vaddr += sgsize;
730133874Srwatson		buflen -= sgsize;
731189346Sbms	}
732102925Ssobomax
733133874Srwatson	*segp = seg;
734133874Srwatson	*lastaddrp = lastaddr;
735133874Srwatson
73684101Sjlemon	/*
73795336Smike	 * Did we fit?
73890868Smike	 */
73995336Smike	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
74095336Smike}
74195336Smike
74295336Smike/*
74395336Smike * Map the buffer buf into bus space using the dmamap map.
74491959Smike */
74591959Smikeint
74695336Smikebus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
747		bus_size_t buflen, bus_dmamap_callback_t *callback,
748		void *callback_arg, int flags)
749{
750	bus_addr_t		lastaddr = 0;
751	int			error, nsegs = 0;
752
753	if (map != NULL) {
754		flags |= BUS_DMA_WAITOK;
755		map->callback = callback;
756		map->callback_arg = callback_arg;
757	}
758
759	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
760	     &lastaddr, dmat->segments, &nsegs, 1);
761
762	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
763	    __func__, dmat, dmat->flags, error, nsegs + 1);
764
765	if (error == EINPROGRESS) {
766		return (error);
767	}
768
769	if (error)
770		(*callback)(callback_arg, dmat->segments, 0, error);
771	else
772		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
773
774	/*
775	 * Return ENOMEM to the caller so that it can pass it up the stack.
776	 * This error only happens when NOWAIT is set, so deferal is disabled.
777	 */
778	if (error == ENOMEM)
779		return (error);
780
781	return (0);
782}
783
784
785/*
786 * Like _bus_dmamap_load(), but for mbufs.
787 */
788static __inline int
789_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
790			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
791			int flags)
792{
793	int error;
794
795	M_ASSERTPKTHDR(m0);
796
797	flags |= BUS_DMA_NOWAIT;
798	*nsegs = 0;
799	error = 0;
800	if (m0->m_pkthdr.len <= dmat->maxsize) {
801		int first = 1;
802		bus_addr_t lastaddr = 0;
803		struct mbuf *m;
804
805		for (m = m0; m != NULL && error == 0; m = m->m_next) {
806			if (m->m_len > 0) {
807				error = _bus_dmamap_load_buffer(dmat, map,
808						m->m_data, m->m_len,
809						NULL, flags, &lastaddr,
810						segs, nsegs, first);
811				first = 0;
812			}
813		}
814	} else {
815		error = EINVAL;
816	}
817
818	/* XXX FIXME: Having to increment nsegs is really annoying */
819	++*nsegs;
820	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
821	    __func__, dmat, dmat->flags, error, *nsegs);
822	return (error);
823}
824
825int
826bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
827		     struct mbuf *m0,
828		     bus_dmamap_callback2_t *callback, void *callback_arg,
829		     int flags)
830{
831	int nsegs, error;
832
833	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
834	    flags);
835
836	if (error) {
837		/* force "no valid mappings" in callback */
838		(*callback)(callback_arg, dmat->segments, 0, 0, error);
839	} else {
840		(*callback)(callback_arg, dmat->segments,
841			    nsegs, m0->m_pkthdr.len, error);
842	}
843	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
844	    __func__, dmat, dmat->flags, error, nsegs);
845	return (error);
846}
847
848int
849bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
850			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
851			int flags)
852{
853	return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
854}
855
856/*
857 * Like _bus_dmamap_load(), but for uios.
858 */
859int
860bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
861		    struct uio *uio,
862		    bus_dmamap_callback2_t *callback, void *callback_arg,
863		    int flags)
864{
865	bus_addr_t lastaddr;
866	int nsegs, error, first, i;
867	bus_size_t resid;
868	struct iovec *iov;
869	pmap_t pmap;
870
871	flags |= BUS_DMA_NOWAIT;
872	resid = uio->uio_resid;
873	iov = uio->uio_iov;
874
875	if (uio->uio_segflg == UIO_USERSPACE) {
876		KASSERT(uio->uio_td != NULL,
877			("bus_dmamap_load_uio: USERSPACE but no proc"));
878		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
879	} else
880		pmap = NULL;
881
882	nsegs = 0;
883	error = 0;
884	first = 1;
885	lastaddr = (bus_addr_t) 0;
886	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
887		/*
888		 * Now at the first iovec to load.  Load each iovec
889		 * until we have exhausted the residual count.
890		 */
891		bus_size_t minlen =
892			resid < iov[i].iov_len ? resid : iov[i].iov_len;
893		caddr_t addr = (caddr_t) iov[i].iov_base;
894
895		if (minlen > 0) {
896			error = _bus_dmamap_load_buffer(dmat, map,
897					addr, minlen, pmap, flags, &lastaddr,
898					dmat->segments, &nsegs, first);
899			first = 0;
900
901			resid -= minlen;
902		}
903	}
904
905	if (error) {
906		/* force "no valid mappings" in callback */
907		(*callback)(callback_arg, dmat->segments, 0, 0, error);
908	} else {
909		(*callback)(callback_arg, dmat->segments,
910			    nsegs+1, uio->uio_resid, error);
911	}
912	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
913	    __func__, dmat, dmat->flags, error, nsegs + 1);
914	return (error);
915}
916
917/*
918 * Release the mapping held by map.
919 */
920void
921_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
922{
923	struct bounce_page *bpage;
924
925	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
926		STAILQ_REMOVE_HEAD(&map->bpages, links);
927		free_bounce_page(dmat, bpage);
928	}
929}
930
931void
932_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
933{
934	struct bounce_page *bpage;
935
936	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
937		/*
938		 * Handle data bouncing.  We might also
939		 * want to add support for invalidating
940		 * the caches on broken hardware
941		 */
942		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
943		    "performing bounce", __func__, op, dmat, dmat->flags);
944
945		if (op & BUS_DMASYNC_PREWRITE) {
946			while (bpage != NULL) {
947				bcopy((void *)bpage->datavaddr,
948				      (void *)bpage->vaddr,
949				      bpage->datacount);
950				bpage = STAILQ_NEXT(bpage, links);
951			}
952			dmat->bounce_zone->total_bounced++;
953		}
954
955		if (op & BUS_DMASYNC_POSTREAD) {
956			while (bpage != NULL) {
957				bcopy((void *)bpage->vaddr,
958				      (void *)bpage->datavaddr,
959				      bpage->datacount);
960				bpage = STAILQ_NEXT(bpage, links);
961			}
962			dmat->bounce_zone->total_bounced++;
963		}
964	}
965}
966
967static void
968init_bounce_pages(void *dummy __unused)
969{
970
971	total_bpages = 0;
972	STAILQ_INIT(&bounce_zone_list);
973	STAILQ_INIT(&bounce_map_waitinglist);
974	STAILQ_INIT(&bounce_map_callbacklist);
975	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
976}
977SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
978
979static struct sysctl_ctx_list *
980busdma_sysctl_tree(struct bounce_zone *bz)
981{
982	return (&bz->sysctl_tree);
983}
984
985static struct sysctl_oid *
986busdma_sysctl_tree_top(struct bounce_zone *bz)
987{
988	return (bz->sysctl_tree_top);
989}
990
991static int
992alloc_bounce_zone(bus_dma_tag_t dmat)
993{
994	struct bounce_zone *bz;
995
996	/* Check to see if we already have a suitable zone */
997	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
998		if ((dmat->alignment <= bz->alignment)
999		 && (dmat->boundary <= bz->boundary)
1000		 && (dmat->lowaddr >= bz->lowaddr)) {
1001			dmat->bounce_zone = bz;
1002			return (0);
1003		}
1004	}
1005
1006	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1007	    M_NOWAIT | M_ZERO)) == NULL)
1008		return (ENOMEM);
1009
1010	STAILQ_INIT(&bz->bounce_page_list);
1011	bz->free_bpages = 0;
1012	bz->reserved_bpages = 0;
1013	bz->active_bpages = 0;
1014	bz->lowaddr = dmat->lowaddr;
1015	bz->alignment = dmat->alignment;
1016	bz->boundary = dmat->boundary;
1017	bz->map_count = 0;
1018	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1019	busdma_zonecount++;
1020	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1021	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1022	dmat->bounce_zone = bz;
1023
1024	sysctl_ctx_init(&bz->sysctl_tree);
1025	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1026	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1027	    CTLFLAG_RD, 0, "");
1028	if (bz->sysctl_tree_top == NULL) {
1029		sysctl_ctx_free(&bz->sysctl_tree);
1030		return (0);	/* XXX error code? */
1031	}
1032
1033	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1034	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1035	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1036	    "Total bounce pages");
1037	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1038	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1039	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1040	    "Free bounce pages");
1041	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1042	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1043	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1044	    "Reserved bounce pages");
1045	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1046	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1047	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1048	    "Active bounce pages");
1049	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1050	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1051	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1052	    "Total bounce requests");
1053	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1054	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1055	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1056	    "Total bounce requests that were deferred");
1057	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1058	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1059	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1060	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1061	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1062	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1063	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1064	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1065	    "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
1066
1067	return (0);
1068}
1069
1070static int
1071alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1072{
1073	struct bounce_zone *bz;
1074	int count;
1075
1076	bz = dmat->bounce_zone;
1077	count = 0;
1078	while (numpages > 0) {
1079		struct bounce_page *bpage;
1080
1081		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1082						     M_NOWAIT | M_ZERO);
1083
1084		if (bpage == NULL)
1085			break;
1086		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1087							 M_NOWAIT, 0ul,
1088							 bz->lowaddr,
1089							 PAGE_SIZE,
1090							 bz->boundary);
1091		if (bpage->vaddr == 0) {
1092			free(bpage, M_DEVBUF);
1093			break;
1094		}
1095		bpage->busaddr = pmap_kextract(bpage->vaddr);
1096		mtx_lock(&bounce_lock);
1097		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1098		total_bpages++;
1099		bz->total_bpages++;
1100		bz->free_bpages++;
1101		mtx_unlock(&bounce_lock);
1102		count++;
1103		numpages--;
1104	}
1105	return (count);
1106}
1107
1108static int
1109reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1110{
1111	struct bounce_zone *bz;
1112	int pages;
1113
1114	mtx_assert(&bounce_lock, MA_OWNED);
1115	bz = dmat->bounce_zone;
1116	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1117	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1118		return (map->pagesneeded - (map->pagesreserved + pages));
1119	bz->free_bpages -= pages;
1120	bz->reserved_bpages += pages;
1121	map->pagesreserved += pages;
1122	pages = map->pagesneeded - map->pagesreserved;
1123
1124	return (pages);
1125}
1126
1127static bus_addr_t
1128add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1129		bus_size_t size)
1130{
1131	struct bounce_zone *bz;
1132	struct bounce_page *bpage;
1133
1134	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1135	KASSERT(map != NULL && map != &nobounce_dmamap,
1136	    ("add_bounce_page: bad map %p", map));
1137
1138	bz = dmat->bounce_zone;
1139	if (map->pagesneeded == 0)
1140		panic("add_bounce_page: map doesn't need any pages");
1141	map->pagesneeded--;
1142
1143	if (map->pagesreserved == 0)
1144		panic("add_bounce_page: map doesn't need any pages");
1145	map->pagesreserved--;
1146
1147	mtx_lock(&bounce_lock);
1148	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1149	if (bpage == NULL)
1150		panic("add_bounce_page: free page list is empty");
1151
1152	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1153	bz->reserved_bpages--;
1154	bz->active_bpages++;
1155	mtx_unlock(&bounce_lock);
1156
1157	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1158		/* page offset needs to be preserved */
1159		bpage->vaddr &= ~PAGE_MASK;
1160		bpage->busaddr &= ~PAGE_MASK;
1161		bpage->vaddr |= vaddr & PAGE_MASK;
1162		bpage->busaddr |= vaddr & PAGE_MASK;
1163	}
1164	bpage->datavaddr = vaddr;
1165	bpage->datacount = size;
1166	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1167	return (bpage->busaddr);
1168}
1169
1170static void
1171free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1172{
1173	struct bus_dmamap *map;
1174	struct bounce_zone *bz;
1175
1176	bz = dmat->bounce_zone;
1177	bpage->datavaddr = 0;
1178	bpage->datacount = 0;
1179
1180	mtx_lock(&bounce_lock);
1181	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1182	bz->free_bpages++;
1183	bz->active_bpages--;
1184	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1185		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1186			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1187			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1188					   map, links);
1189			busdma_swi_pending = 1;
1190			bz->total_deferred++;
1191			swi_sched(vm_ih, 0);
1192		}
1193	}
1194	mtx_unlock(&bounce_lock);
1195}
1196
1197void
1198busdma_swi(void)
1199{
1200	bus_dma_tag_t dmat;
1201	struct bus_dmamap *map;
1202
1203	mtx_lock(&bounce_lock);
1204	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1205		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1206		mtx_unlock(&bounce_lock);
1207		dmat = map->dmat;
1208		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1209		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1210				map->callback, map->callback_arg, /*flags*/0);
1211		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1212		mtx_lock(&bounce_lock);
1213	}
1214	mtx_unlock(&bounce_lock);
1215}
1216