busdma_machdep.c revision 32516
132516Sgibbs/*
232516Sgibbs * Copyright (c) 1997 Justin T. Gibbs.
332516Sgibbs * All rights reserved.
432516Sgibbs *
532516Sgibbs * Redistribution and use in source and binary forms, with or without
632516Sgibbs * modification, are permitted provided that the following conditions
732516Sgibbs * are met:
832516Sgibbs * 1. Redistributions of source code must retain the above copyright
932516Sgibbs *    notice, this list of conditions, and the following disclaimer,
1032516Sgibbs *    without modification, immediately at the beginning of the file.
1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products
1232516Sgibbs *    derived from this software without specific prior written permission.
1332516Sgibbs *
1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2432516Sgibbs * SUCH DAMAGE.
2532516Sgibbs *
2632516Sgibbs *      $Id$
2732516Sgibbs */
2832516Sgibbs
2932516Sgibbs#include <sys/param.h>
3032516Sgibbs#include <sys/systm.h>
3132516Sgibbs#include <sys/malloc.h>
3232516Sgibbs#include <sys/queue.h>
3332516Sgibbs
3432516Sgibbs#include <vm/vm.h>
3532516Sgibbs#include <vm/vm_prot.h>
3632516Sgibbs#include <vm/vm_page.h>
3732516Sgibbs
3832516Sgibbs#include <machine/bus.h>
3932516Sgibbs#include <machine/md_var.h>
4032516Sgibbs
4132516Sgibbs#define MAX(a,b) (((a) > (b)) ? (a) : (b))
4232516Sgibbs#define MIN(a,b) (((a) < (b)) ? (a) : (b))
4332516Sgibbs#define MAX_BPAGES 128
4432516Sgibbs
4532516Sgibbsstruct bus_dma_tag {
4632516Sgibbs	bus_dma_tag_t	  parent;
4732516Sgibbs	bus_size_t	  boundary;
4832516Sgibbs	bus_addr_t	  lowaddr;
4932516Sgibbs	bus_addr_t	  highaddr;
5032516Sgibbs	bus_dma_filter_t *filter;
5132516Sgibbs	void		 *filterarg;
5232516Sgibbs	bus_size_t	  maxsize;
5332516Sgibbs	int		  nsegments;
5432516Sgibbs	bus_size_t	  maxsegsz;
5532516Sgibbs	int		  flags;
5632516Sgibbs	int		  ref_count;
5732516Sgibbs	int		  map_count;
5832516Sgibbs};
5932516Sgibbs
6032516Sgibbsstruct bounce_page {
6132516Sgibbs	vm_offset_t	vaddr;		/* kva of bounce buffer */
6232516Sgibbs	bus_addr_t	busaddr;	/* Physical address */
6332516Sgibbs	vm_offset_t	datavaddr;	/* kva of client data */
6432516Sgibbs	bus_size_t	datacount;	/* client data count */
6532516Sgibbs	STAILQ_ENTRY(bounce_page) links;
6632516Sgibbs};
6732516Sgibbs
6832516Sgibbsint busdma_swi_pending;
6932516Sgibbs
7032516Sgibbsstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
7132516Sgibbsstatic int free_bpages;
7232516Sgibbsstatic int reserved_bpages;
7332516Sgibbsstatic int active_bpages;
7432516Sgibbsstatic int total_bpages;
7532516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
7632516Sgibbs
7732516Sgibbsstruct bus_dmamap {
7832516Sgibbs	struct bp_list	       bpages;
7932516Sgibbs	int		       pagesneeded;
8032516Sgibbs	int		       pagesreserved;
8132516Sgibbs	bus_dma_tag_t	       dmat;
8232516Sgibbs	void		      *buf;		/* unmapped buffer pointer */
8332516Sgibbs	bus_size_t	       buflen;		/* unmapped buffer length */
8432516Sgibbs	bus_dmamap_callback_t *callback;
8532516Sgibbs	void		      *callback_arg;
8632516Sgibbs	STAILQ_ENTRY(bus_dmamap) links;
8732516Sgibbs};
8832516Sgibbs
8932516Sgibbsstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
9032516Sgibbsstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
9132516Sgibbsstatic struct bus_dmamap nobounce_dmamap;
9232516Sgibbs
9332516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
9432516Sgibbsstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
9532516Sgibbsstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
9632516Sgibbs				   vm_offset_t vaddr, bus_size_t size);
9732516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
9832516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
9932516Sgibbs
10032516Sgibbsstatic __inline int
10132516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
10232516Sgibbs{
10332516Sgibbs	int retval;
10432516Sgibbs
10532516Sgibbs	retval = 0;
10632516Sgibbs	do {
10732516Sgibbs		if (paddr > dmat->lowaddr
10832516Sgibbs		 && paddr <= dmat->highaddr
10932516Sgibbs		 && (dmat->filter == NULL
11032516Sgibbs		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
11132516Sgibbs			retval = 1;
11232516Sgibbs
11332516Sgibbs		dmat = dmat->parent;
11432516Sgibbs	} while (retval == 0 && dmat != NULL);
11532516Sgibbs	return (retval);
11632516Sgibbs}
11732516Sgibbs
11832516Sgibbs/*
11932516Sgibbs * Allocate a device specific dma_tag.
12032516Sgibbs */
12132516Sgibbsint
12232516Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t boundary,
12332516Sgibbs		   bus_addr_t lowaddr, bus_addr_t highaddr,
12432516Sgibbs		   bus_dma_filter_t *filter, void *filterarg,
12532516Sgibbs		   bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
12632516Sgibbs		   int flags, bus_dma_tag_t *dmat)
12732516Sgibbs{
12832516Sgibbs	bus_dma_tag_t newtag;
12932516Sgibbs	int error = 0;
13032516Sgibbs
13132516Sgibbs	/* Return a NULL tag on failure */
13232516Sgibbs	*dmat = NULL;
13332516Sgibbs
13432516Sgibbs	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
13532516Sgibbs	if (newtag == NULL)
13632516Sgibbs		return (ENOMEM);
13732516Sgibbs
13832516Sgibbs	newtag->parent = parent;
13932516Sgibbs	newtag->boundary = boundary;
14032516Sgibbs	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
14132516Sgibbs	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
14232516Sgibbs	newtag->filter = filter;
14332516Sgibbs	newtag->filterarg = filterarg;
14432516Sgibbs	newtag->maxsize = maxsize;
14532516Sgibbs	newtag->nsegments = nsegments;
14632516Sgibbs	newtag->maxsegsz = maxsegsz;
14732516Sgibbs	newtag->flags = flags;
14832516Sgibbs	newtag->ref_count = 1; /* Count ourself */
14932516Sgibbs	newtag->map_count = 0;
15032516Sgibbs
15132516Sgibbs	/* Take into account any restrictions imposed by our parent tag */
15232516Sgibbs	if (parent != NULL) {
15332516Sgibbs		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
15432516Sgibbs		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
15532516Sgibbs		/*
15632516Sgibbs		 * XXX Not really correct??? Probably need to honor boundary
15732516Sgibbs		 *     all the way up the inheritence chain.
15832516Sgibbs		 */
15932516Sgibbs		newtag->boundary = MIN(parent->boundary, newtag->boundary);
16032516Sgibbs		if (newtag->filter == NULL) {
16132516Sgibbs			/*
16232516Sgibbs			 * Short circuit looking at our parent directly
16332516Sgibbs			 * since we have encapsulated all of it's information
16432516Sgibbs			 */
16532516Sgibbs			newtag->filter = parent->filter;
16632516Sgibbs			newtag->filterarg = parent->filterarg;
16732516Sgibbs			newtag->parent = parent->parent;
16832516Sgibbs		}
16932516Sgibbs		if (newtag->parent != NULL) {
17032516Sgibbs			parent->ref_count++;
17132516Sgibbs		}
17232516Sgibbs	}
17332516Sgibbs
17432516Sgibbs	if (newtag->lowaddr < ptoa(Maxmem)) {
17532516Sgibbs		/* Must bounce */
17632516Sgibbs
17732516Sgibbs		if (lowaddr > bounce_lowaddr) {
17832516Sgibbs			/*
17932516Sgibbs			 * Go through the pool and kill any pages
18032516Sgibbs			 * that don't reside below lowaddr.
18132516Sgibbs			 */
18232516Sgibbs			panic("bus_dmamap_create: page reallocation "
18332516Sgibbs			      "not implemented");
18432516Sgibbs		}
18532516Sgibbs		if (ptoa(total_bpages) < maxsize) {
18632516Sgibbs			int pages;
18732516Sgibbs
18832516Sgibbs			pages = atop(maxsize) - total_bpages;
18932516Sgibbs
19032516Sgibbs			/* Add pages to our bounce pool */
19132516Sgibbs			if (alloc_bounce_pages(newtag, pages) < pages)
19232516Sgibbs				error = ENOMEM;
19332516Sgibbs		}
19432516Sgibbs	}
19532516Sgibbs
19632516Sgibbs	if (error != 0) {
19732516Sgibbs		free(newtag, M_DEVBUF);
19832516Sgibbs	} else {
19932516Sgibbs		*dmat = newtag;
20032516Sgibbs	}
20132516Sgibbs	return (error);
20232516Sgibbs}
20332516Sgibbs
20432516Sgibbsint
20532516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat)
20632516Sgibbs{
20732516Sgibbs	if (dmat != NULL) {
20832516Sgibbs
20932516Sgibbs		if (dmat->map_count != 0)
21032516Sgibbs			return (EBUSY);
21132516Sgibbs
21232516Sgibbs		while (dmat != NULL) {
21332516Sgibbs			bus_dma_tag_t parent;
21432516Sgibbs
21532516Sgibbs			parent = dmat->parent;
21632516Sgibbs			dmat->ref_count--;
21732516Sgibbs			if (dmat->ref_count == 0) {
21832516Sgibbs				free(dmat, M_DEVBUF);
21932516Sgibbs			}
22032516Sgibbs			dmat = parent;
22132516Sgibbs		}
22232516Sgibbs	}
22332516Sgibbs	return (0);
22432516Sgibbs}
22532516Sgibbs
22632516Sgibbs/*
22732516Sgibbs * Allocate a handle for mapping from kva/uva/physical
22832516Sgibbs * address space into bus device space.
22932516Sgibbs */
23032516Sgibbsint
23132516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
23232516Sgibbs{
23332516Sgibbs	int error;
23432516Sgibbs
23532516Sgibbs	error = 0;
23632516Sgibbs
23732516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem)) {
23832516Sgibbs		/* Must bounce */
23932516Sgibbs		int maxpages;
24032516Sgibbs
24132516Sgibbs		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
24232516Sgibbs					     M_NOWAIT);
24332516Sgibbs		if (*mapp == NULL) {
24432516Sgibbs			error = ENOMEM;
24532516Sgibbs		} else {
24632516Sgibbs			/* Initialize the new map */
24732516Sgibbs			bzero(*mapp, sizeof(**mapp));
24832516Sgibbs			STAILQ_INIT(&((*mapp)->bpages));
24932516Sgibbs		}
25032516Sgibbs		/*
25132516Sgibbs		 * Attempt to add pages to our pool on a per-instance
25232516Sgibbs		 * basis up to a sane limit.
25332516Sgibbs		 */
25432516Sgibbs		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
25532516Sgibbs		if (dmat->map_count > 0
25632516Sgibbs		 && total_bpages < maxpages) {
25732516Sgibbs			int pages;
25832516Sgibbs
25932516Sgibbs			pages = atop(dmat->maxsize);
26032516Sgibbs			pages = MIN(maxpages - total_bpages, pages);
26132516Sgibbs			alloc_bounce_pages(dmat, pages);
26232516Sgibbs		}
26332516Sgibbs	} else {
26432516Sgibbs		*mapp = NULL;
26532516Sgibbs	}
26632516Sgibbs	if (error == 0)
26732516Sgibbs		dmat->map_count++;
26832516Sgibbs	return (error);
26932516Sgibbs}
27032516Sgibbs
27132516Sgibbs/*
27232516Sgibbs * Destroy a handle for mapping from kva/uva/physical
27332516Sgibbs * address space into bus device space.
27432516Sgibbs */
27532516Sgibbsint
27632516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
27732516Sgibbs{
27832516Sgibbs	if (map != NULL) {
27932516Sgibbs		if (STAILQ_FIRST(&map->bpages) != NULL)
28032516Sgibbs			return (EBUSY);
28132516Sgibbs		free(map, M_DEVBUF);
28232516Sgibbs	}
28332516Sgibbs	dmat->map_count--;
28432516Sgibbs	return (0);
28532516Sgibbs}
28632516Sgibbs
28732516Sgibbs#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
28832516Sgibbs
28932516Sgibbs/*
29032516Sgibbs * Map the buffer buf into bus space using the dmamap map.
29132516Sgibbs */
29232516Sgibbsint
29332516Sgibbsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
29432516Sgibbs		bus_size_t buflen, bus_dmamap_callback_t *callback,
29532516Sgibbs		void *callback_arg, int flags)
29632516Sgibbs{
29732516Sgibbs	vm_offset_t		vaddr;
29832516Sgibbs	vm_offset_t		paddr;
29932516Sgibbs#ifdef __GNUC__
30032516Sgibbs	bus_dma_segment_t	dm_segments[dmat->nsegments];
30132516Sgibbs#else
30232516Sgibbs	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
30332516Sgibbs#endif
30432516Sgibbs	bus_dma_segment_t      *sg;
30532516Sgibbs	int			seg;
30632516Sgibbs	int			error;
30732516Sgibbs
30832516Sgibbs	error = 0;
30932516Sgibbs	/*
31032516Sgibbs	 * If we are being called during a callback, pagesneeded will
31132516Sgibbs	 * be non-zero, so we can avoid doing the work twice.
31232516Sgibbs	 */
31332516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
31432516Sgibbs		vm_offset_t	vendaddr;
31532516Sgibbs
31632516Sgibbs		/*
31732516Sgibbs		 * Count the number of bounce pages
31832516Sgibbs		 * needed in order to complete this transfer
31932516Sgibbs		 */
32032516Sgibbs		vaddr = trunc_page(buf);
32132516Sgibbs		vendaddr = (vm_offset_t)buf + buflen;
32232516Sgibbs
32332516Sgibbs		while (vaddr < vendaddr) {
32432516Sgibbs			paddr = pmap_kextract(vaddr);
32532516Sgibbs			if (run_filter(dmat, paddr) != 0) {
32632516Sgibbs
32732516Sgibbs				map->pagesneeded++;
32832516Sgibbs			}
32932516Sgibbs			vaddr += PAGE_SIZE;
33032516Sgibbs		}
33132516Sgibbs	}
33232516Sgibbs
33332516Sgibbs	if (map == NULL)
33432516Sgibbs		map = &nobounce_dmamap;
33532516Sgibbs
33632516Sgibbs	/* Reserve Necessary Bounce Pages */
33732516Sgibbs	if (map->pagesneeded != 0) {
33832516Sgibbs		int s;
33932516Sgibbs
34032516Sgibbs		s = splhigh();
34132516Sgibbs	 	if (reserve_bounce_pages(dmat, map) != 0) {
34232516Sgibbs
34332516Sgibbs			/* Queue us for resources */
34432516Sgibbs			map->dmat = dmat;
34532516Sgibbs			map->buf = buf;
34632516Sgibbs			map->buflen = buflen;
34732516Sgibbs			map->callback = callback;
34832516Sgibbs			map->callback_arg = callback_arg;
34932516Sgibbs
35032516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
35132516Sgibbs			splx(s);
35232516Sgibbs
35332516Sgibbs			return (EINPROGRESS);
35432516Sgibbs		}
35532516Sgibbs		splx(s);
35632516Sgibbs	}
35732516Sgibbs
35832516Sgibbs	vaddr = (vm_offset_t)buf;
35932516Sgibbs	sg = &dm_segments[0];
36032516Sgibbs	seg = 1;
36132516Sgibbs	sg->ds_len = 0;
36232516Sgibbs
36332516Sgibbs	do {
36432516Sgibbs		bus_size_t	size;
36532516Sgibbs		vm_offset_t	nextpaddr;
36632516Sgibbs
36732516Sgibbs		paddr = pmap_kextract(vaddr);
36832516Sgibbs		size = PAGE_SIZE - (paddr & PAGE_MASK);
36932516Sgibbs		if (size > buflen)
37032516Sgibbs			size = buflen;
37132516Sgibbs
37232516Sgibbs		if (map->pagesneeded != 0
37332516Sgibbs		 && run_filter(dmat, paddr)) {
37432516Sgibbs			paddr = add_bounce_page(dmat, map, vaddr, size);
37532516Sgibbs		}
37632516Sgibbs
37732516Sgibbs		if (sg->ds_len == 0) {
37832516Sgibbs			sg->ds_addr = paddr;
37932516Sgibbs			sg->ds_len = size;
38032516Sgibbs		} else if (paddr == nextpaddr) {
38132516Sgibbs			sg->ds_len += size;
38232516Sgibbs		} else {
38332516Sgibbs			/* Go to the next segment */
38432516Sgibbs			sg++;
38532516Sgibbs			seg++;
38632516Sgibbs			if (seg > dmat->nsegments)
38732516Sgibbs				break;
38832516Sgibbs			sg->ds_addr = paddr;
38932516Sgibbs			sg->ds_len = size;
39032516Sgibbs		}
39132516Sgibbs		vaddr += size;
39232516Sgibbs		nextpaddr = paddr + size;
39332516Sgibbs		buflen -= size;
39432516Sgibbs	} while (buflen > 0);
39532516Sgibbs
39632516Sgibbs	if (buflen != 0) {
39732516Sgibbs		printf("bus_dmamap_load: Too many segs!\n");
39832516Sgibbs		error = EFBIG;
39932516Sgibbs	}
40032516Sgibbs
40132516Sgibbs	(*callback)(callback_arg, dm_segments, seg, error);
40232516Sgibbs
40332516Sgibbs	return (0);
40432516Sgibbs}
40532516Sgibbs
40632516Sgibbs/*
40732516Sgibbs * Release the mapping held by map.
40832516Sgibbs */
40932516Sgibbsvoid
41032516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
41132516Sgibbs{
41232516Sgibbs	struct bounce_page *bpage;
41332516Sgibbs
41432516Sgibbs	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
41532516Sgibbs		STAILQ_REMOVE_HEAD(&map->bpages, links);
41632516Sgibbs		free_bounce_page(dmat, bpage);
41732516Sgibbs	}
41832516Sgibbs}
41932516Sgibbs
42032516Sgibbsvoid
42132516Sgibbs_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
42232516Sgibbs{
42332516Sgibbs	struct bounce_page *bpage;
42432516Sgibbs
42532516Sgibbs	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
42632516Sgibbs
42732516Sgibbs		/*
42832516Sgibbs		 * Handle data bouncing.  We might also
42932516Sgibbs		 * want to add support for invalidating
43032516Sgibbs		 * the caches on broken hardware
43132516Sgibbs		 */
43232516Sgibbs		switch (op) {
43332516Sgibbs		case BUS_DMASYNC_PREWRITE:
43432516Sgibbs			while (bpage != NULL) {
43532516Sgibbs				bcopy((void *)bpage->datavaddr,
43632516Sgibbs				      (void *)bpage->vaddr,
43732516Sgibbs				      bpage->datacount);
43832516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
43932516Sgibbs			}
44032516Sgibbs			break;
44132516Sgibbs
44232516Sgibbs		case BUS_DMASYNC_POSTREAD:
44332516Sgibbs			while (bpage != NULL) {
44432516Sgibbs				bcopy((void *)bpage->vaddr,
44532516Sgibbs				      (void *)bpage->datavaddr,
44632516Sgibbs				      bpage->datacount);
44732516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
44832516Sgibbs			}
44932516Sgibbs			break;
45032516Sgibbs		case BUS_DMASYNC_PREREAD:
45132516Sgibbs		case BUS_DMASYNC_POSTWRITE:
45232516Sgibbs			/* No-ops */
45332516Sgibbs			break;
45432516Sgibbs		}
45532516Sgibbs	}
45632516Sgibbs}
45732516Sgibbs
45832516Sgibbsstatic int
45932516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
46032516Sgibbs{
46132516Sgibbs	int count;
46232516Sgibbs
46332516Sgibbs	count = 0;
46432516Sgibbs	if (total_bpages == 0) {
46532516Sgibbs		STAILQ_INIT(&bounce_page_list);
46632516Sgibbs		STAILQ_INIT(&bounce_map_waitinglist);
46732516Sgibbs		STAILQ_INIT(&bounce_map_callbacklist);
46832516Sgibbs	}
46932516Sgibbs
47032516Sgibbs	while (numpages > 0) {
47132516Sgibbs		struct bounce_page *bpage;
47232516Sgibbs		int s;
47332516Sgibbs
47432516Sgibbs		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
47532516Sgibbs						     M_NOWAIT);
47632516Sgibbs
47732516Sgibbs		if (bpage == NULL)
47832516Sgibbs			break;
47932516Sgibbs		bzero(bpage, sizeof(*bpage));
48032516Sgibbs		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
48132516Sgibbs							 M_NOWAIT, 0ul,
48232516Sgibbs							 dmat->lowaddr,
48332516Sgibbs							 PAGE_SIZE, 0x10000);
48432516Sgibbs		if (bpage->vaddr == NULL) {
48532516Sgibbs			free(bpage, M_DEVBUF);
48632516Sgibbs			break;
48732516Sgibbs		}
48832516Sgibbs		bpage->busaddr = pmap_kextract(bpage->vaddr);
48932516Sgibbs		s = splhigh();
49032516Sgibbs		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
49132516Sgibbs		total_bpages++;
49232516Sgibbs		free_bpages++;
49332516Sgibbs		splx(s);
49432516Sgibbs		count++;
49532516Sgibbs		numpages--;
49632516Sgibbs	}
49732516Sgibbs	return (count);
49832516Sgibbs}
49932516Sgibbs
50032516Sgibbsstatic int
50132516Sgibbsreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
50232516Sgibbs{
50332516Sgibbs	int pages;
50432516Sgibbs
50532516Sgibbs	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
50632516Sgibbs	free_bpages -= pages;
50732516Sgibbs	reserved_bpages += pages;
50832516Sgibbs	map->pagesreserved += pages;
50932516Sgibbs	pages = map->pagesneeded - map->pagesreserved;
51032516Sgibbs
51132516Sgibbs	return (pages);
51232516Sgibbs}
51332516Sgibbs
51432516Sgibbsstatic vm_offset_t
51532516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
51632516Sgibbs		bus_size_t size)
51732516Sgibbs{
51832516Sgibbs	int s;
51932516Sgibbs	struct bounce_page *bpage;
52032516Sgibbs
52132516Sgibbs	if (map->pagesneeded == 0)
52232516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
52332516Sgibbs	map->pagesneeded--;
52432516Sgibbs
52532516Sgibbs	if (map->pagesreserved == 0)
52632516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
52732516Sgibbs	map->pagesreserved--;
52832516Sgibbs
52932516Sgibbs	s = splhigh();
53032516Sgibbs	bpage = STAILQ_FIRST(&bounce_page_list);
53132516Sgibbs	if (bpage == NULL)
53232516Sgibbs		panic("add_bounce_page: free page list is empty");
53332516Sgibbs
53432516Sgibbs	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
53532516Sgibbs	reserved_bpages--;
53632516Sgibbs	active_bpages++;
53732516Sgibbs	splx(s);
53832516Sgibbs
53932516Sgibbs	bpage->datavaddr = vaddr;
54032516Sgibbs	bpage->datacount = size;
54132516Sgibbs	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
54232516Sgibbs	return (bpage->busaddr);
54332516Sgibbs}
54432516Sgibbs
54532516Sgibbsstatic void
54632516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
54732516Sgibbs{
54832516Sgibbs	int s;
54932516Sgibbs	struct bus_dmamap *map;
55032516Sgibbs
55132516Sgibbs	bpage->datavaddr = 0;
55232516Sgibbs	bpage->datacount = 0;
55332516Sgibbs
55432516Sgibbs	s = splhigh();
55532516Sgibbs	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
55632516Sgibbs	free_bpages++;
55732516Sgibbs	active_bpages--;
55832516Sgibbs	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
55932516Sgibbs		if (reserve_bounce_pages(map->dmat, map) == 0) {
56032516Sgibbs			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
56132516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
56232516Sgibbs					   map, links);
56332516Sgibbs			busdma_swi_pending = 1;
56432516Sgibbs			setsoftvm();
56532516Sgibbs		}
56632516Sgibbs	}
56732516Sgibbs	splx(s);
56832516Sgibbs}
56932516Sgibbs
57032516Sgibbsvoid
57132516Sgibbsbusdma_swi()
57232516Sgibbs{
57332516Sgibbs	int s;
57432516Sgibbs	struct bus_dmamap *map;
57532516Sgibbs
57632516Sgibbs	s = splhigh();
57732516Sgibbs	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
57832516Sgibbs		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
57932516Sgibbs		splx(s);
58032516Sgibbs		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
58132516Sgibbs				map->callback, map->callback_arg, /*flags*/0);
58232516Sgibbs		s = splhigh();
58332516Sgibbs	}
58432516Sgibbs	splx(s);
58532516Sgibbs}
586