busdma_machdep.c revision 41764
132516Sgibbs/*
240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs.
332516Sgibbs * All rights reserved.
432516Sgibbs *
532516Sgibbs * Redistribution and use in source and binary forms, with or without
632516Sgibbs * modification, are permitted provided that the following conditions
732516Sgibbs * are met:
832516Sgibbs * 1. Redistributions of source code must retain the above copyright
932516Sgibbs *    notice, this list of conditions, and the following disclaimer,
1032516Sgibbs *    without modification, immediately at the beginning of the file.
1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products
1232516Sgibbs *    derived from this software without specific prior written permission.
1332516Sgibbs *
1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2432516Sgibbs * SUCH DAMAGE.
2532516Sgibbs *
2641764Sdillon *      $Id: busdma_machdep.c,v 1.11 1998/10/13 08:24:33 dg Exp $
2732516Sgibbs */
2832516Sgibbs
2932516Sgibbs#include <sys/param.h>
3032516Sgibbs#include <sys/systm.h>
3132516Sgibbs#include <sys/malloc.h>
3232516Sgibbs
3332516Sgibbs#include <vm/vm.h>
3432516Sgibbs#include <vm/vm_prot.h>
3532516Sgibbs#include <vm/vm_page.h>
3632516Sgibbs
3732516Sgibbs#include <machine/bus.h>
3832516Sgibbs#include <machine/md_var.h>
3932516Sgibbs
4032516Sgibbs#define MAX(a,b) (((a) > (b)) ? (a) : (b))
4132516Sgibbs#define MIN(a,b) (((a) < (b)) ? (a) : (b))
4232516Sgibbs#define MAX_BPAGES 128
4332516Sgibbs
4432516Sgibbsstruct bus_dma_tag {
4532516Sgibbs	bus_dma_tag_t	  parent;
4635767Sgibbs	bus_size_t	  alignment;
4732516Sgibbs	bus_size_t	  boundary;
4832516Sgibbs	bus_addr_t	  lowaddr;
4932516Sgibbs	bus_addr_t	  highaddr;
5032516Sgibbs	bus_dma_filter_t *filter;
5132516Sgibbs	void		 *filterarg;
5232516Sgibbs	bus_size_t	  maxsize;
5335767Sgibbs	u_int		  nsegments;
5432516Sgibbs	bus_size_t	  maxsegsz;
5532516Sgibbs	int		  flags;
5632516Sgibbs	int		  ref_count;
5732516Sgibbs	int		  map_count;
5832516Sgibbs};
5932516Sgibbs
6032516Sgibbsstruct bounce_page {
6132516Sgibbs	vm_offset_t	vaddr;		/* kva of bounce buffer */
6232516Sgibbs	bus_addr_t	busaddr;	/* Physical address */
6332516Sgibbs	vm_offset_t	datavaddr;	/* kva of client data */
6432516Sgibbs	bus_size_t	datacount;	/* client data count */
6532516Sgibbs	STAILQ_ENTRY(bounce_page) links;
6632516Sgibbs};
6732516Sgibbs
6832516Sgibbsint busdma_swi_pending;
6932516Sgibbs
7032516Sgibbsstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
7132516Sgibbsstatic int free_bpages;
7232516Sgibbsstatic int reserved_bpages;
7332516Sgibbsstatic int active_bpages;
7432516Sgibbsstatic int total_bpages;
7532516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
7632516Sgibbs
7732516Sgibbsstruct bus_dmamap {
7832516Sgibbs	struct bp_list	       bpages;
7932516Sgibbs	int		       pagesneeded;
8032516Sgibbs	int		       pagesreserved;
8132516Sgibbs	bus_dma_tag_t	       dmat;
8232516Sgibbs	void		      *buf;		/* unmapped buffer pointer */
8332516Sgibbs	bus_size_t	       buflen;		/* unmapped buffer length */
8432516Sgibbs	bus_dmamap_callback_t *callback;
8532516Sgibbs	void		      *callback_arg;
8632516Sgibbs	STAILQ_ENTRY(bus_dmamap) links;
8732516Sgibbs};
8832516Sgibbs
8932516Sgibbsstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
9032516Sgibbsstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
9132516Sgibbsstatic struct bus_dmamap nobounce_dmamap;
9232516Sgibbs
9332516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
9432516Sgibbsstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
9532516Sgibbsstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
9632516Sgibbs				   vm_offset_t vaddr, bus_size_t size);
9732516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
9832516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
9932516Sgibbs
10032516Sgibbsstatic __inline int
10132516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
10232516Sgibbs{
10332516Sgibbs	int retval;
10432516Sgibbs
10532516Sgibbs	retval = 0;
10632516Sgibbs	do {
10732516Sgibbs		if (paddr > dmat->lowaddr
10832516Sgibbs		 && paddr <= dmat->highaddr
10932516Sgibbs		 && (dmat->filter == NULL
11032516Sgibbs		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
11132516Sgibbs			retval = 1;
11232516Sgibbs
11332516Sgibbs		dmat = dmat->parent;
11432516Sgibbs	} while (retval == 0 && dmat != NULL);
11532516Sgibbs	return (retval);
11632516Sgibbs}
11732516Sgibbs
11835767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
11932516Sgibbs/*
12032516Sgibbs * Allocate a device specific dma_tag.
12132516Sgibbs */
12232516Sgibbsint
12335767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
12435767Sgibbs		   bus_size_t boundary, bus_addr_t lowaddr,
12535767Sgibbs		   bus_addr_t highaddr, bus_dma_filter_t *filter,
12635767Sgibbs		   void *filterarg, bus_size_t maxsize, int nsegments,
12735767Sgibbs		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
12832516Sgibbs{
12932516Sgibbs	bus_dma_tag_t newtag;
13032516Sgibbs	int error = 0;
13132516Sgibbs
13232516Sgibbs	/* Return a NULL tag on failure */
13332516Sgibbs	*dmat = NULL;
13432516Sgibbs
13532516Sgibbs	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
13632516Sgibbs	if (newtag == NULL)
13732516Sgibbs		return (ENOMEM);
13832516Sgibbs
13932516Sgibbs	newtag->parent = parent;
14032516Sgibbs	newtag->boundary = boundary;
14140286Sdg	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
14240286Sdg	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
14332516Sgibbs	newtag->filter = filter;
14432516Sgibbs	newtag->filterarg = filterarg;
14532516Sgibbs	newtag->maxsize = maxsize;
14632516Sgibbs	newtag->nsegments = nsegments;
14732516Sgibbs	newtag->maxsegsz = maxsegsz;
14832516Sgibbs	newtag->flags = flags;
14932516Sgibbs	newtag->ref_count = 1; /* Count ourself */
15032516Sgibbs	newtag->map_count = 0;
15132516Sgibbs
15232516Sgibbs	/* Take into account any restrictions imposed by our parent tag */
15332516Sgibbs	if (parent != NULL) {
15432516Sgibbs		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
15532516Sgibbs		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
15632516Sgibbs		/*
15732516Sgibbs		 * XXX Not really correct??? Probably need to honor boundary
15832516Sgibbs		 *     all the way up the inheritence chain.
15932516Sgibbs		 */
16035767Sgibbs		newtag->boundary = MAX(parent->boundary, newtag->boundary);
16132516Sgibbs		if (newtag->filter == NULL) {
16232516Sgibbs			/*
16332516Sgibbs			 * Short circuit looking at our parent directly
16435256Sdes			 * since we have encapsulated all of its information
16532516Sgibbs			 */
16632516Sgibbs			newtag->filter = parent->filter;
16732516Sgibbs			newtag->filterarg = parent->filterarg;
16832516Sgibbs			newtag->parent = parent->parent;
16932516Sgibbs		}
17032516Sgibbs		if (newtag->parent != NULL) {
17132516Sgibbs			parent->ref_count++;
17232516Sgibbs		}
17332516Sgibbs	}
17432516Sgibbs
17535767Sgibbs	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
17632516Sgibbs		/* Must bounce */
17732516Sgibbs
17832516Sgibbs		if (lowaddr > bounce_lowaddr) {
17932516Sgibbs			/*
18032516Sgibbs			 * Go through the pool and kill any pages
18132516Sgibbs			 * that don't reside below lowaddr.
18232516Sgibbs			 */
18335767Sgibbs			panic("bus_dma_tag_create: page reallocation "
18432516Sgibbs			      "not implemented");
18532516Sgibbs		}
18632516Sgibbs		if (ptoa(total_bpages) < maxsize) {
18732516Sgibbs			int pages;
18832516Sgibbs
18932516Sgibbs			pages = atop(maxsize) - total_bpages;
19032516Sgibbs
19132516Sgibbs			/* Add pages to our bounce pool */
19232516Sgibbs			if (alloc_bounce_pages(newtag, pages) < pages)
19332516Sgibbs				error = ENOMEM;
19432516Sgibbs		}
19535767Sgibbs		/* Performed initial allocation */
19635767Sgibbs		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
19732516Sgibbs	}
19832516Sgibbs
19932516Sgibbs	if (error != 0) {
20032516Sgibbs		free(newtag, M_DEVBUF);
20132516Sgibbs	} else {
20232516Sgibbs		*dmat = newtag;
20332516Sgibbs	}
20432516Sgibbs	return (error);
20532516Sgibbs}
20632516Sgibbs
20732516Sgibbsint
20832516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat)
20932516Sgibbs{
21032516Sgibbs	if (dmat != NULL) {
21132516Sgibbs
21232516Sgibbs		if (dmat->map_count != 0)
21332516Sgibbs			return (EBUSY);
21432516Sgibbs
21532516Sgibbs		while (dmat != NULL) {
21632516Sgibbs			bus_dma_tag_t parent;
21732516Sgibbs
21832516Sgibbs			parent = dmat->parent;
21932516Sgibbs			dmat->ref_count--;
22032516Sgibbs			if (dmat->ref_count == 0) {
22132516Sgibbs				free(dmat, M_DEVBUF);
22240029Sgibbs				/*
22340029Sgibbs				 * Last reference count, so
22440029Sgibbs				 * release our reference
22540029Sgibbs				 * count on our parent.
22640029Sgibbs				 */
22740029Sgibbs				dmat = parent;
22840029Sgibbs			} else
22940029Sgibbs				dmat = NULL;
23032516Sgibbs		}
23132516Sgibbs	}
23232516Sgibbs	return (0);
23332516Sgibbs}
23432516Sgibbs
23532516Sgibbs/*
23632516Sgibbs * Allocate a handle for mapping from kva/uva/physical
23732516Sgibbs * address space into bus device space.
23832516Sgibbs */
23932516Sgibbsint
24032516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
24132516Sgibbs{
24232516Sgibbs	int error;
24332516Sgibbs
24432516Sgibbs	error = 0;
24532516Sgibbs
24632516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem)) {
24732516Sgibbs		/* Must bounce */
24832516Sgibbs		int maxpages;
24932516Sgibbs
25032516Sgibbs		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
25132516Sgibbs					     M_NOWAIT);
25232516Sgibbs		if (*mapp == NULL) {
25335767Sgibbs			return (ENOMEM);
25432516Sgibbs		} else {
25532516Sgibbs			/* Initialize the new map */
25632516Sgibbs			bzero(*mapp, sizeof(**mapp));
25732516Sgibbs			STAILQ_INIT(&((*mapp)->bpages));
25832516Sgibbs		}
25932516Sgibbs		/*
26032516Sgibbs		 * Attempt to add pages to our pool on a per-instance
26132516Sgibbs		 * basis up to a sane limit.
26232516Sgibbs		 */
26332516Sgibbs		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
26435767Sgibbs		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
26535767Sgibbs		 || (dmat->map_count > 0
26635767Sgibbs		  && total_bpages < maxpages)) {
26732516Sgibbs			int pages;
26832516Sgibbs
26935767Sgibbs			if (dmat->lowaddr > bounce_lowaddr) {
27035767Sgibbs				/*
27135767Sgibbs				 * Go through the pool and kill any pages
27235767Sgibbs				 * that don't reside below lowaddr.
27335767Sgibbs				 */
27435767Sgibbs				panic("bus_dmamap_create: page reallocation "
27535767Sgibbs				      "not implemented");
27635767Sgibbs			}
27732516Sgibbs			pages = atop(dmat->maxsize);
27832516Sgibbs			pages = MIN(maxpages - total_bpages, pages);
27935767Sgibbs			error = alloc_bounce_pages(dmat, pages);
28035767Sgibbs
28135767Sgibbs			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
28235767Sgibbs				if (error == 0)
28335767Sgibbs					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
28435767Sgibbs			} else {
28535767Sgibbs				error = 0;
28635767Sgibbs			}
28732516Sgibbs		}
28832516Sgibbs	} else {
28940029Sgibbs		*mapp = NULL;
29032516Sgibbs	}
29132516Sgibbs	if (error == 0)
29232516Sgibbs		dmat->map_count++;
29332516Sgibbs	return (error);
29432516Sgibbs}
29532516Sgibbs
29632516Sgibbs/*
29732516Sgibbs * Destroy a handle for mapping from kva/uva/physical
29832516Sgibbs * address space into bus device space.
29932516Sgibbs */
30032516Sgibbsint
30132516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
30232516Sgibbs{
30332516Sgibbs	if (map != NULL) {
30432516Sgibbs		if (STAILQ_FIRST(&map->bpages) != NULL)
30532516Sgibbs			return (EBUSY);
30632516Sgibbs		free(map, M_DEVBUF);
30732516Sgibbs	}
30832516Sgibbs	dmat->map_count--;
30932516Sgibbs	return (0);
31032516Sgibbs}
31132516Sgibbs
31235767Sgibbs
31335767Sgibbs/*
31435767Sgibbs * Allocate a piece of memory that can be efficiently mapped into
31535767Sgibbs * bus device space based on the constraints lited in the dma tag.
31635767Sgibbs * A dmamap to for use with dmamap_load is also allocated.
31735767Sgibbs */
31835767Sgibbsint
31935767Sgibbsbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
32035767Sgibbs		 bus_dmamap_t *mapp)
32135767Sgibbs{
32235767Sgibbs	/* If we succeed, no mapping/bouncing will be required */
32340029Sgibbs	*mapp = NULL;
32435767Sgibbs
32535767Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
32635767Sgibbs		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
32735767Sgibbs				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
32835767Sgibbs	} else {
32935767Sgibbs		/*
33035767Sgibbs		 * XXX Use Contigmalloc until it is merged into this facility
33135767Sgibbs		 *     and handles multi-seg allocations.  Nobody is doing
33235767Sgibbs		 *     multi-seg allocations yet though.
33335767Sgibbs		 */
33435767Sgibbs		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
33535767Sgibbs				      (flags & BUS_DMA_NOWAIT)
33635767Sgibbs				      ? M_NOWAIT : M_WAITOK,
33735767Sgibbs				      0ul, dmat->lowaddr, 1ul, dmat->boundary);
33835767Sgibbs	}
33935767Sgibbs	if (*vaddr == NULL)
34035767Sgibbs		return (ENOMEM);
34135767Sgibbs	return (0);
34235767Sgibbs}
34335767Sgibbs
34435767Sgibbs/*
34535767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated
34635767Sgibbs * via bus_dmamem_alloc.
34735767Sgibbs */
34835767Sgibbsvoid
34935767Sgibbsbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
35035767Sgibbs{
35135767Sgibbs	/*
35235767Sgibbs	 * dmamem does not need to be bounced, so the map should be
35335767Sgibbs	 * NULL
35435767Sgibbs	 */
35539243Sgibbs	if (map != &nobounce_dmamap)
35635767Sgibbs		panic("bus_dmamem_free: Invalid map freed\n");
35740029Sgibbs	/* XXX There is no "contigfree" and "free" doesn't work */
35840029Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
35940029Sgibbs		free(vaddr, M_DEVBUF);
36035767Sgibbs}
36135767Sgibbs
36232516Sgibbs#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
36332516Sgibbs
36432516Sgibbs/*
36532516Sgibbs * Map the buffer buf into bus space using the dmamap map.
36632516Sgibbs */
36732516Sgibbsint
36832516Sgibbsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
36932516Sgibbs		bus_size_t buflen, bus_dmamap_callback_t *callback,
37032516Sgibbs		void *callback_arg, int flags)
37132516Sgibbs{
37232516Sgibbs	vm_offset_t		vaddr;
37332516Sgibbs	vm_offset_t		paddr;
37432516Sgibbs#ifdef __GNUC__
37532516Sgibbs	bus_dma_segment_t	dm_segments[dmat->nsegments];
37632516Sgibbs#else
37732516Sgibbs	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
37832516Sgibbs#endif
37932516Sgibbs	bus_dma_segment_t      *sg;
38032516Sgibbs	int			seg;
38132516Sgibbs	int			error;
38232516Sgibbs
38340029Sgibbs	if (map == NULL)
38440029Sgibbs		map = &nobounce_dmamap;
38540029Sgibbs
38632516Sgibbs	error = 0;
38732516Sgibbs	/*
38832516Sgibbs	 * If we are being called during a callback, pagesneeded will
38932516Sgibbs	 * be non-zero, so we can avoid doing the work twice.
39032516Sgibbs	 */
39132516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
39232516Sgibbs		vm_offset_t	vendaddr;
39332516Sgibbs
39432516Sgibbs		/*
39532516Sgibbs		 * Count the number of bounce pages
39632516Sgibbs		 * needed in order to complete this transfer
39732516Sgibbs		 */
39840286Sdg		vaddr = trunc_page((vm_offset_t)buf);
39932516Sgibbs		vendaddr = (vm_offset_t)buf + buflen;
40032516Sgibbs
40132516Sgibbs		while (vaddr < vendaddr) {
40232516Sgibbs			paddr = pmap_kextract(vaddr);
40332516Sgibbs			if (run_filter(dmat, paddr) != 0) {
40432516Sgibbs
40532516Sgibbs				map->pagesneeded++;
40632516Sgibbs			}
40732516Sgibbs			vaddr += PAGE_SIZE;
40832516Sgibbs		}
40932516Sgibbs	}
41032516Sgibbs
41132516Sgibbs	/* Reserve Necessary Bounce Pages */
41232516Sgibbs	if (map->pagesneeded != 0) {
41332516Sgibbs		int s;
41432516Sgibbs
41532516Sgibbs		s = splhigh();
41632516Sgibbs	 	if (reserve_bounce_pages(dmat, map) != 0) {
41732516Sgibbs
41832516Sgibbs			/* Queue us for resources */
41932516Sgibbs			map->dmat = dmat;
42032516Sgibbs			map->buf = buf;
42132516Sgibbs			map->buflen = buflen;
42232516Sgibbs			map->callback = callback;
42332516Sgibbs			map->callback_arg = callback_arg;
42432516Sgibbs
42532516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
42632516Sgibbs			splx(s);
42732516Sgibbs
42832516Sgibbs			return (EINPROGRESS);
42932516Sgibbs		}
43032516Sgibbs		splx(s);
43132516Sgibbs	}
43232516Sgibbs
43332516Sgibbs	vaddr = (vm_offset_t)buf;
43432516Sgibbs	sg = &dm_segments[0];
43532516Sgibbs	seg = 1;
43632516Sgibbs	sg->ds_len = 0;
43732516Sgibbs
43841764Sdillon	{
43941764Sdillon		/*
44041764Sdillon		 * note: nextpaddr not used on first loop
44141764Sdillon		 */
44241764Sdillon		vm_offset_t	nextpaddr = 0;
44332516Sgibbs
44441764Sdillon		do {
44541764Sdillon			bus_size_t	size;
44632516Sgibbs
44741764Sdillon			paddr = pmap_kextract(vaddr);
44841764Sdillon			size = PAGE_SIZE - (paddr & PAGE_MASK);
44941764Sdillon			if (size > buflen)
45041764Sdillon				size = buflen;
45132516Sgibbs
45241764Sdillon			if (map->pagesneeded != 0
45341764Sdillon			 && run_filter(dmat, paddr)) {
45441764Sdillon				paddr = add_bounce_page(dmat, map,
45541764Sdillon				    vaddr, size);
45641764Sdillon			}
45732516Sgibbs
45841764Sdillon			if (sg->ds_len == 0) {
45941764Sdillon				sg->ds_addr = paddr;
46041764Sdillon				sg->ds_len = size;
46141764Sdillon			} else if (paddr == nextpaddr) {
46241764Sdillon				sg->ds_len += size;
46341764Sdillon			} else {
46441764Sdillon				/* Go to the next segment */
46541764Sdillon				sg++;
46641764Sdillon				seg++;
46741764Sdillon				if (seg > dmat->nsegments)
46841764Sdillon					break;
46941764Sdillon				sg->ds_addr = paddr;
47041764Sdillon				sg->ds_len = size;
47141764Sdillon			}
47241764Sdillon			vaddr += size;
47341764Sdillon			nextpaddr = paddr + size;
47441764Sdillon			buflen -= size;
47541764Sdillon		} while (buflen > 0);
47641764Sdillon	}
47741764Sdillon
47832516Sgibbs	if (buflen != 0) {
47937555Sbde		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
48039755Sbde		       (u_long)buflen);
48132516Sgibbs		error = EFBIG;
48232516Sgibbs	}
48332516Sgibbs
48432516Sgibbs	(*callback)(callback_arg, dm_segments, seg, error);
48532516Sgibbs
48632516Sgibbs	return (0);
48732516Sgibbs}
48832516Sgibbs
48932516Sgibbs/*
49032516Sgibbs * Release the mapping held by map.
49132516Sgibbs */
49232516Sgibbsvoid
49332516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
49432516Sgibbs{
49532516Sgibbs	struct bounce_page *bpage;
49632516Sgibbs
49732516Sgibbs	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
49832516Sgibbs		STAILQ_REMOVE_HEAD(&map->bpages, links);
49932516Sgibbs		free_bounce_page(dmat, bpage);
50032516Sgibbs	}
50132516Sgibbs}
50232516Sgibbs
50332516Sgibbsvoid
50432516Sgibbs_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
50532516Sgibbs{
50632516Sgibbs	struct bounce_page *bpage;
50732516Sgibbs
50832516Sgibbs	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
50932516Sgibbs
51032516Sgibbs		/*
51132516Sgibbs		 * Handle data bouncing.  We might also
51232516Sgibbs		 * want to add support for invalidating
51332516Sgibbs		 * the caches on broken hardware
51432516Sgibbs		 */
51532516Sgibbs		switch (op) {
51632516Sgibbs		case BUS_DMASYNC_PREWRITE:
51732516Sgibbs			while (bpage != NULL) {
51832516Sgibbs				bcopy((void *)bpage->datavaddr,
51932516Sgibbs				      (void *)bpage->vaddr,
52032516Sgibbs				      bpage->datacount);
52132516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
52232516Sgibbs			}
52332516Sgibbs			break;
52432516Sgibbs
52532516Sgibbs		case BUS_DMASYNC_POSTREAD:
52632516Sgibbs			while (bpage != NULL) {
52732516Sgibbs				bcopy((void *)bpage->vaddr,
52832516Sgibbs				      (void *)bpage->datavaddr,
52932516Sgibbs				      bpage->datacount);
53032516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
53132516Sgibbs			}
53232516Sgibbs			break;
53332516Sgibbs		case BUS_DMASYNC_PREREAD:
53432516Sgibbs		case BUS_DMASYNC_POSTWRITE:
53532516Sgibbs			/* No-ops */
53632516Sgibbs			break;
53732516Sgibbs		}
53832516Sgibbs	}
53932516Sgibbs}
54032516Sgibbs
54132516Sgibbsstatic int
54232516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
54332516Sgibbs{
54432516Sgibbs	int count;
54532516Sgibbs
54632516Sgibbs	count = 0;
54732516Sgibbs	if (total_bpages == 0) {
54832516Sgibbs		STAILQ_INIT(&bounce_page_list);
54932516Sgibbs		STAILQ_INIT(&bounce_map_waitinglist);
55032516Sgibbs		STAILQ_INIT(&bounce_map_callbacklist);
55132516Sgibbs	}
55232516Sgibbs
55332516Sgibbs	while (numpages > 0) {
55432516Sgibbs		struct bounce_page *bpage;
55532516Sgibbs		int s;
55632516Sgibbs
55732516Sgibbs		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
55832516Sgibbs						     M_NOWAIT);
55932516Sgibbs
56032516Sgibbs		if (bpage == NULL)
56132516Sgibbs			break;
56232516Sgibbs		bzero(bpage, sizeof(*bpage));
56332516Sgibbs		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
56432516Sgibbs							 M_NOWAIT, 0ul,
56532516Sgibbs							 dmat->lowaddr,
56635767Sgibbs							 PAGE_SIZE,
56735767Sgibbs							 0);
56832516Sgibbs		if (bpage->vaddr == NULL) {
56932516Sgibbs			free(bpage, M_DEVBUF);
57032516Sgibbs			break;
57132516Sgibbs		}
57232516Sgibbs		bpage->busaddr = pmap_kextract(bpage->vaddr);
57332516Sgibbs		s = splhigh();
57432516Sgibbs		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
57532516Sgibbs		total_bpages++;
57632516Sgibbs		free_bpages++;
57732516Sgibbs		splx(s);
57832516Sgibbs		count++;
57932516Sgibbs		numpages--;
58032516Sgibbs	}
58132516Sgibbs	return (count);
58232516Sgibbs}
58332516Sgibbs
58432516Sgibbsstatic int
58532516Sgibbsreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
58632516Sgibbs{
58732516Sgibbs	int pages;
58832516Sgibbs
58932516Sgibbs	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
59032516Sgibbs	free_bpages -= pages;
59132516Sgibbs	reserved_bpages += pages;
59232516Sgibbs	map->pagesreserved += pages;
59332516Sgibbs	pages = map->pagesneeded - map->pagesreserved;
59432516Sgibbs
59532516Sgibbs	return (pages);
59632516Sgibbs}
59732516Sgibbs
59832516Sgibbsstatic vm_offset_t
59932516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
60032516Sgibbs		bus_size_t size)
60132516Sgibbs{
60232516Sgibbs	int s;
60332516Sgibbs	struct bounce_page *bpage;
60432516Sgibbs
60532516Sgibbs	if (map->pagesneeded == 0)
60632516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
60732516Sgibbs	map->pagesneeded--;
60832516Sgibbs
60932516Sgibbs	if (map->pagesreserved == 0)
61032516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
61132516Sgibbs	map->pagesreserved--;
61232516Sgibbs
61332516Sgibbs	s = splhigh();
61432516Sgibbs	bpage = STAILQ_FIRST(&bounce_page_list);
61532516Sgibbs	if (bpage == NULL)
61632516Sgibbs		panic("add_bounce_page: free page list is empty");
61732516Sgibbs
61832516Sgibbs	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
61932516Sgibbs	reserved_bpages--;
62032516Sgibbs	active_bpages++;
62132516Sgibbs	splx(s);
62232516Sgibbs
62332516Sgibbs	bpage->datavaddr = vaddr;
62432516Sgibbs	bpage->datacount = size;
62532516Sgibbs	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
62632516Sgibbs	return (bpage->busaddr);
62732516Sgibbs}
62832516Sgibbs
62932516Sgibbsstatic void
63032516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
63132516Sgibbs{
63232516Sgibbs	int s;
63332516Sgibbs	struct bus_dmamap *map;
63432516Sgibbs
63532516Sgibbs	bpage->datavaddr = 0;
63632516Sgibbs	bpage->datacount = 0;
63732516Sgibbs
63832516Sgibbs	s = splhigh();
63932516Sgibbs	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
64032516Sgibbs	free_bpages++;
64132516Sgibbs	active_bpages--;
64232516Sgibbs	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
64332516Sgibbs		if (reserve_bounce_pages(map->dmat, map) == 0) {
64432516Sgibbs			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
64532516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
64632516Sgibbs					   map, links);
64732516Sgibbs			busdma_swi_pending = 1;
64832516Sgibbs			setsoftvm();
64932516Sgibbs		}
65032516Sgibbs	}
65132516Sgibbs	splx(s);
65232516Sgibbs}
65332516Sgibbs
65432516Sgibbsvoid
65532516Sgibbsbusdma_swi()
65632516Sgibbs{
65732516Sgibbs	int s;
65832516Sgibbs	struct bus_dmamap *map;
65932516Sgibbs
66032516Sgibbs	s = splhigh();
66132516Sgibbs	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
66232516Sgibbs		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
66332516Sgibbs		splx(s);
66432516Sgibbs		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
66532516Sgibbs				map->callback, map->callback_arg, /*flags*/0);
66632516Sgibbs		s = splhigh();
66732516Sgibbs	}
66832516Sgibbs	splx(s);
66932516Sgibbs}
670