busdma_machdep.c revision 37555
132516Sgibbs/*
232516Sgibbs * Copyright (c) 1997 Justin T. Gibbs.
332516Sgibbs * All rights reserved.
432516Sgibbs *
532516Sgibbs * Redistribution and use in source and binary forms, with or without
632516Sgibbs * modification, are permitted provided that the following conditions
732516Sgibbs * are met:
832516Sgibbs * 1. Redistributions of source code must retain the above copyright
932516Sgibbs *    notice, this list of conditions, and the following disclaimer,
1032516Sgibbs *    without modification, immediately at the beginning of the file.
1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products
1232516Sgibbs *    derived from this software without specific prior written permission.
1332516Sgibbs *
1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2432516Sgibbs * SUCH DAMAGE.
2532516Sgibbs *
2637555Sbde *      $Id: busdma_machdep.c,v 1.6 1998/05/06 01:45:55 gibbs Exp $
2732516Sgibbs */
2832516Sgibbs
2932516Sgibbs#include <sys/param.h>
3032516Sgibbs#include <sys/systm.h>
3132516Sgibbs#include <sys/malloc.h>
3232516Sgibbs
3332516Sgibbs#include <vm/vm.h>
3432516Sgibbs#include <vm/vm_prot.h>
3532516Sgibbs#include <vm/vm_page.h>
3632516Sgibbs
3732516Sgibbs#include <machine/bus.h>
3832516Sgibbs#include <machine/md_var.h>
3932516Sgibbs
4032516Sgibbs#define MAX(a,b) (((a) > (b)) ? (a) : (b))
4132516Sgibbs#define MIN(a,b) (((a) < (b)) ? (a) : (b))
4232516Sgibbs#define MAX_BPAGES 128
4332516Sgibbs
4432516Sgibbsstruct bus_dma_tag {
4532516Sgibbs	bus_dma_tag_t	  parent;
4635767Sgibbs	bus_size_t	  alignment;
4732516Sgibbs	bus_size_t	  boundary;
4832516Sgibbs	bus_addr_t	  lowaddr;
4932516Sgibbs	bus_addr_t	  highaddr;
5032516Sgibbs	bus_dma_filter_t *filter;
5132516Sgibbs	void		 *filterarg;
5232516Sgibbs	bus_size_t	  maxsize;
5335767Sgibbs	u_int		  nsegments;
5432516Sgibbs	bus_size_t	  maxsegsz;
5532516Sgibbs	int		  flags;
5632516Sgibbs	int		  ref_count;
5732516Sgibbs	int		  map_count;
5832516Sgibbs};
5932516Sgibbs
6032516Sgibbsstruct bounce_page {
6132516Sgibbs	vm_offset_t	vaddr;		/* kva of bounce buffer */
6232516Sgibbs	bus_addr_t	busaddr;	/* Physical address */
6332516Sgibbs	vm_offset_t	datavaddr;	/* kva of client data */
6432516Sgibbs	bus_size_t	datacount;	/* client data count */
6532516Sgibbs	STAILQ_ENTRY(bounce_page) links;
6632516Sgibbs};
6732516Sgibbs
6832516Sgibbsint busdma_swi_pending;
6932516Sgibbs
7032516Sgibbsstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
7132516Sgibbsstatic int free_bpages;
7232516Sgibbsstatic int reserved_bpages;
7332516Sgibbsstatic int active_bpages;
7432516Sgibbsstatic int total_bpages;
7532516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
7632516Sgibbs
7732516Sgibbsstruct bus_dmamap {
7832516Sgibbs	struct bp_list	       bpages;
7932516Sgibbs	int		       pagesneeded;
8032516Sgibbs	int		       pagesreserved;
8132516Sgibbs	bus_dma_tag_t	       dmat;
8232516Sgibbs	void		      *buf;		/* unmapped buffer pointer */
8332516Sgibbs	bus_size_t	       buflen;		/* unmapped buffer length */
8432516Sgibbs	bus_dmamap_callback_t *callback;
8532516Sgibbs	void		      *callback_arg;
8632516Sgibbs	STAILQ_ENTRY(bus_dmamap) links;
8732516Sgibbs};
8832516Sgibbs
8932516Sgibbsstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
9032516Sgibbsstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
9132516Sgibbsstatic struct bus_dmamap nobounce_dmamap;
9232516Sgibbs
9332516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
9432516Sgibbsstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
9532516Sgibbsstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
9632516Sgibbs				   vm_offset_t vaddr, bus_size_t size);
9732516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
9832516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
9932516Sgibbs
10032516Sgibbsstatic __inline int
10132516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
10232516Sgibbs{
10332516Sgibbs	int retval;
10432516Sgibbs
10532516Sgibbs	retval = 0;
10632516Sgibbs	do {
10732516Sgibbs		if (paddr > dmat->lowaddr
10832516Sgibbs		 && paddr <= dmat->highaddr
10932516Sgibbs		 && (dmat->filter == NULL
11032516Sgibbs		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
11132516Sgibbs			retval = 1;
11232516Sgibbs
11332516Sgibbs		dmat = dmat->parent;
11432516Sgibbs	} while (retval == 0 && dmat != NULL);
11532516Sgibbs	return (retval);
11632516Sgibbs}
11732516Sgibbs
11835767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
11932516Sgibbs/*
12032516Sgibbs * Allocate a device specific dma_tag.
12132516Sgibbs */
12232516Sgibbsint
12335767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
12435767Sgibbs		   bus_size_t boundary, bus_addr_t lowaddr,
12535767Sgibbs		   bus_addr_t highaddr, bus_dma_filter_t *filter,
12635767Sgibbs		   void *filterarg, bus_size_t maxsize, int nsegments,
12735767Sgibbs		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
12832516Sgibbs{
12932516Sgibbs	bus_dma_tag_t newtag;
13032516Sgibbs	int error = 0;
13132516Sgibbs
13232516Sgibbs	/* Return a NULL tag on failure */
13332516Sgibbs	*dmat = NULL;
13432516Sgibbs
13532516Sgibbs	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
13632516Sgibbs	if (newtag == NULL)
13732516Sgibbs		return (ENOMEM);
13832516Sgibbs
13932516Sgibbs	newtag->parent = parent;
14032516Sgibbs	newtag->boundary = boundary;
14132516Sgibbs	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
14232516Sgibbs	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
14332516Sgibbs	newtag->filter = filter;
14432516Sgibbs	newtag->filterarg = filterarg;
14532516Sgibbs	newtag->maxsize = maxsize;
14632516Sgibbs	newtag->nsegments = nsegments;
14732516Sgibbs	newtag->maxsegsz = maxsegsz;
14832516Sgibbs	newtag->flags = flags;
14932516Sgibbs	newtag->ref_count = 1; /* Count ourself */
15032516Sgibbs	newtag->map_count = 0;
15132516Sgibbs
15232516Sgibbs	/* Take into account any restrictions imposed by our parent tag */
15332516Sgibbs	if (parent != NULL) {
15432516Sgibbs		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
15532516Sgibbs		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
15632516Sgibbs		/*
15732516Sgibbs		 * XXX Not really correct??? Probably need to honor boundary
15832516Sgibbs		 *     all the way up the inheritence chain.
15932516Sgibbs		 */
16035767Sgibbs		newtag->boundary = MAX(parent->boundary, newtag->boundary);
16132516Sgibbs		if (newtag->filter == NULL) {
16232516Sgibbs			/*
16332516Sgibbs			 * Short circuit looking at our parent directly
16435256Sdes			 * since we have encapsulated all of its information
16532516Sgibbs			 */
16632516Sgibbs			newtag->filter = parent->filter;
16732516Sgibbs			newtag->filterarg = parent->filterarg;
16832516Sgibbs			newtag->parent = parent->parent;
16932516Sgibbs		}
17032516Sgibbs		if (newtag->parent != NULL) {
17132516Sgibbs			parent->ref_count++;
17232516Sgibbs		}
17332516Sgibbs	}
17432516Sgibbs
17535767Sgibbs	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
17632516Sgibbs		/* Must bounce */
17732516Sgibbs
17832516Sgibbs		if (lowaddr > bounce_lowaddr) {
17932516Sgibbs			/*
18032516Sgibbs			 * Go through the pool and kill any pages
18132516Sgibbs			 * that don't reside below lowaddr.
18232516Sgibbs			 */
18335767Sgibbs			panic("bus_dma_tag_create: page reallocation "
18432516Sgibbs			      "not implemented");
18532516Sgibbs		}
18632516Sgibbs		if (ptoa(total_bpages) < maxsize) {
18732516Sgibbs			int pages;
18832516Sgibbs
18932516Sgibbs			pages = atop(maxsize) - total_bpages;
19032516Sgibbs
19132516Sgibbs			/* Add pages to our bounce pool */
19232516Sgibbs			if (alloc_bounce_pages(newtag, pages) < pages)
19332516Sgibbs				error = ENOMEM;
19432516Sgibbs		}
19535767Sgibbs		/* Performed initial allocation */
19635767Sgibbs		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
19732516Sgibbs	}
19832516Sgibbs
19932516Sgibbs	if (error != 0) {
20032516Sgibbs		free(newtag, M_DEVBUF);
20132516Sgibbs	} else {
20232516Sgibbs		*dmat = newtag;
20332516Sgibbs	}
20432516Sgibbs	return (error);
20532516Sgibbs}
20632516Sgibbs
20732516Sgibbsint
20832516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat)
20932516Sgibbs{
21032516Sgibbs	if (dmat != NULL) {
21132516Sgibbs
21232516Sgibbs		if (dmat->map_count != 0)
21332516Sgibbs			return (EBUSY);
21432516Sgibbs
21532516Sgibbs		while (dmat != NULL) {
21632516Sgibbs			bus_dma_tag_t parent;
21732516Sgibbs
21832516Sgibbs			parent = dmat->parent;
21932516Sgibbs			dmat->ref_count--;
22032516Sgibbs			if (dmat->ref_count == 0) {
22132516Sgibbs				free(dmat, M_DEVBUF);
22232516Sgibbs			}
22332516Sgibbs			dmat = parent;
22432516Sgibbs		}
22532516Sgibbs	}
22632516Sgibbs	return (0);
22732516Sgibbs}
22832516Sgibbs
22932516Sgibbs/*
23032516Sgibbs * Allocate a handle for mapping from kva/uva/physical
23132516Sgibbs * address space into bus device space.
23232516Sgibbs */
23332516Sgibbsint
23432516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
23532516Sgibbs{
23632516Sgibbs	int error;
23732516Sgibbs
23832516Sgibbs	error = 0;
23932516Sgibbs
24032516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem)) {
24132516Sgibbs		/* Must bounce */
24232516Sgibbs		int maxpages;
24332516Sgibbs
24432516Sgibbs		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
24532516Sgibbs					     M_NOWAIT);
24632516Sgibbs		if (*mapp == NULL) {
24735767Sgibbs			return (ENOMEM);
24832516Sgibbs		} else {
24932516Sgibbs			/* Initialize the new map */
25032516Sgibbs			bzero(*mapp, sizeof(**mapp));
25132516Sgibbs			STAILQ_INIT(&((*mapp)->bpages));
25232516Sgibbs		}
25332516Sgibbs		/*
25432516Sgibbs		 * Attempt to add pages to our pool on a per-instance
25532516Sgibbs		 * basis up to a sane limit.
25632516Sgibbs		 */
25732516Sgibbs		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
25835767Sgibbs		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
25935767Sgibbs		 || (dmat->map_count > 0
26035767Sgibbs		  && total_bpages < maxpages)) {
26132516Sgibbs			int pages;
26232516Sgibbs
26335767Sgibbs			if (dmat->lowaddr > bounce_lowaddr) {
26435767Sgibbs				/*
26535767Sgibbs				 * Go through the pool and kill any pages
26635767Sgibbs				 * that don't reside below lowaddr.
26735767Sgibbs				 */
26835767Sgibbs				panic("bus_dmamap_create: page reallocation "
26935767Sgibbs				      "not implemented");
27035767Sgibbs			}
27132516Sgibbs			pages = atop(dmat->maxsize);
27232516Sgibbs			pages = MIN(maxpages - total_bpages, pages);
27335767Sgibbs			error = alloc_bounce_pages(dmat, pages);
27435767Sgibbs
27535767Sgibbs			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
27635767Sgibbs				if (error == 0)
27735767Sgibbs					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
27835767Sgibbs			} else {
27935767Sgibbs				error = 0;
28035767Sgibbs			}
28132516Sgibbs		}
28232516Sgibbs	} else {
28335767Sgibbs		*mapp = &nobounce_dmamap;
28432516Sgibbs	}
28532516Sgibbs	if (error == 0)
28632516Sgibbs		dmat->map_count++;
28732516Sgibbs	return (error);
28832516Sgibbs}
28932516Sgibbs
29032516Sgibbs/*
29132516Sgibbs * Destroy a handle for mapping from kva/uva/physical
29232516Sgibbs * address space into bus device space.
29332516Sgibbs */
29432516Sgibbsint
29532516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
29632516Sgibbs{
29732516Sgibbs	if (map != NULL) {
29832516Sgibbs		if (STAILQ_FIRST(&map->bpages) != NULL)
29932516Sgibbs			return (EBUSY);
30032516Sgibbs		free(map, M_DEVBUF);
30132516Sgibbs	}
30232516Sgibbs	dmat->map_count--;
30332516Sgibbs	return (0);
30432516Sgibbs}
30532516Sgibbs
30635767Sgibbs
30735767Sgibbs/*
30835767Sgibbs * Allocate a piece of memory that can be efficiently mapped into
30935767Sgibbs * bus device space based on the constraints lited in the dma tag.
31035767Sgibbs * A dmamap to for use with dmamap_load is also allocated.
31135767Sgibbs */
31235767Sgibbsint
31335767Sgibbsbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
31435767Sgibbs		 bus_dmamap_t *mapp)
31535767Sgibbs{
31635767Sgibbs	/* If we succeed, no mapping/bouncing will be required */
31735767Sgibbs	*mapp = &nobounce_dmamap;
31835767Sgibbs
31935767Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
32035767Sgibbs		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
32135767Sgibbs				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
32235767Sgibbs	} else {
32335767Sgibbs		/*
32435767Sgibbs		 * XXX Use Contigmalloc until it is merged into this facility
32535767Sgibbs		 *     and handles multi-seg allocations.  Nobody is doing
32635767Sgibbs		 *     multi-seg allocations yet though.
32735767Sgibbs		 */
32835767Sgibbs		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
32935767Sgibbs				      (flags & BUS_DMA_NOWAIT)
33035767Sgibbs				      ? M_NOWAIT : M_WAITOK,
33135767Sgibbs				      0ul, dmat->lowaddr, 1ul, dmat->boundary);
33235767Sgibbs	}
33335767Sgibbs	if (*vaddr == NULL)
33435767Sgibbs		return (ENOMEM);
33535767Sgibbs	return (0);
33635767Sgibbs}
33735767Sgibbs
33835767Sgibbs/*
33935767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated
34035767Sgibbs * via bus_dmamem_alloc.
34135767Sgibbs */
34235767Sgibbsvoid
34335767Sgibbsbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
34435767Sgibbs{
34535767Sgibbs	/*
34635767Sgibbs	 * dmamem does not need to be bounced, so the map should be
34735767Sgibbs	 * NULL
34835767Sgibbs	 */
34935767Sgibbs	if (map != NULL)
35035767Sgibbs		panic("bus_dmamem_free: Invalid map freed\n");
35135767Sgibbs	free(vaddr, M_DEVBUF);
35235767Sgibbs}
35335767Sgibbs
35432516Sgibbs#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
35532516Sgibbs
35632516Sgibbs/*
35732516Sgibbs * Map the buffer buf into bus space using the dmamap map.
35832516Sgibbs */
35932516Sgibbsint
36032516Sgibbsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
36132516Sgibbs		bus_size_t buflen, bus_dmamap_callback_t *callback,
36232516Sgibbs		void *callback_arg, int flags)
36332516Sgibbs{
36432516Sgibbs	vm_offset_t		vaddr;
36532516Sgibbs	vm_offset_t		paddr;
36632516Sgibbs#ifdef __GNUC__
36732516Sgibbs	bus_dma_segment_t	dm_segments[dmat->nsegments];
36832516Sgibbs#else
36932516Sgibbs	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
37032516Sgibbs#endif
37132516Sgibbs	bus_dma_segment_t      *sg;
37232516Sgibbs	int			seg;
37332516Sgibbs	int			error;
37432516Sgibbs
37532516Sgibbs	error = 0;
37632516Sgibbs	/*
37732516Sgibbs	 * If we are being called during a callback, pagesneeded will
37832516Sgibbs	 * be non-zero, so we can avoid doing the work twice.
37932516Sgibbs	 */
38032516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
38132516Sgibbs		vm_offset_t	vendaddr;
38232516Sgibbs
38332516Sgibbs		/*
38432516Sgibbs		 * Count the number of bounce pages
38532516Sgibbs		 * needed in order to complete this transfer
38632516Sgibbs		 */
38732516Sgibbs		vaddr = trunc_page(buf);
38832516Sgibbs		vendaddr = (vm_offset_t)buf + buflen;
38932516Sgibbs
39032516Sgibbs		while (vaddr < vendaddr) {
39132516Sgibbs			paddr = pmap_kextract(vaddr);
39232516Sgibbs			if (run_filter(dmat, paddr) != 0) {
39332516Sgibbs
39432516Sgibbs				map->pagesneeded++;
39532516Sgibbs			}
39632516Sgibbs			vaddr += PAGE_SIZE;
39732516Sgibbs		}
39832516Sgibbs	}
39932516Sgibbs
40032516Sgibbs	/* Reserve Necessary Bounce Pages */
40132516Sgibbs	if (map->pagesneeded != 0) {
40232516Sgibbs		int s;
40332516Sgibbs
40432516Sgibbs		s = splhigh();
40532516Sgibbs	 	if (reserve_bounce_pages(dmat, map) != 0) {
40632516Sgibbs
40732516Sgibbs			/* Queue us for resources */
40832516Sgibbs			map->dmat = dmat;
40932516Sgibbs			map->buf = buf;
41032516Sgibbs			map->buflen = buflen;
41132516Sgibbs			map->callback = callback;
41232516Sgibbs			map->callback_arg = callback_arg;
41332516Sgibbs
41432516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
41532516Sgibbs			splx(s);
41632516Sgibbs
41732516Sgibbs			return (EINPROGRESS);
41832516Sgibbs		}
41932516Sgibbs		splx(s);
42032516Sgibbs	}
42132516Sgibbs
42232516Sgibbs	vaddr = (vm_offset_t)buf;
42332516Sgibbs	sg = &dm_segments[0];
42432516Sgibbs	seg = 1;
42532516Sgibbs	sg->ds_len = 0;
42632516Sgibbs
42732516Sgibbs	do {
42832516Sgibbs		bus_size_t	size;
42935767Sgibbs		vm_offset_t	nextpaddr;	/* GCC warning expected */
43032516Sgibbs
43132516Sgibbs		paddr = pmap_kextract(vaddr);
43232516Sgibbs		size = PAGE_SIZE - (paddr & PAGE_MASK);
43332516Sgibbs		if (size > buflen)
43432516Sgibbs			size = buflen;
43532516Sgibbs
43632516Sgibbs		if (map->pagesneeded != 0
43732516Sgibbs		 && run_filter(dmat, paddr)) {
43832516Sgibbs			paddr = add_bounce_page(dmat, map, vaddr, size);
43932516Sgibbs		}
44032516Sgibbs
44132516Sgibbs		if (sg->ds_len == 0) {
44232516Sgibbs			sg->ds_addr = paddr;
44332516Sgibbs			sg->ds_len = size;
44432516Sgibbs		} else if (paddr == nextpaddr) {
44532516Sgibbs			sg->ds_len += size;
44632516Sgibbs		} else {
44732516Sgibbs			/* Go to the next segment */
44832516Sgibbs			sg++;
44932516Sgibbs			seg++;
45032516Sgibbs			if (seg > dmat->nsegments)
45132516Sgibbs				break;
45232516Sgibbs			sg->ds_addr = paddr;
45332516Sgibbs			sg->ds_len = size;
45432516Sgibbs		}
45532516Sgibbs		vaddr += size;
45632516Sgibbs		nextpaddr = paddr + size;
45732516Sgibbs		buflen -= size;
45832516Sgibbs	} while (buflen > 0);
45932516Sgibbs
46032516Sgibbs	if (buflen != 0) {
46137555Sbde		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
46235767Sgibbs		       buflen);
46332516Sgibbs		error = EFBIG;
46432516Sgibbs	}
46532516Sgibbs
46632516Sgibbs	(*callback)(callback_arg, dm_segments, seg, error);
46732516Sgibbs
46832516Sgibbs	return (0);
46932516Sgibbs}
47032516Sgibbs
47132516Sgibbs/*
47232516Sgibbs * Release the mapping held by map.
47332516Sgibbs */
47432516Sgibbsvoid
47532516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
47632516Sgibbs{
47732516Sgibbs	struct bounce_page *bpage;
47832516Sgibbs
47932516Sgibbs	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
48032516Sgibbs		STAILQ_REMOVE_HEAD(&map->bpages, links);
48132516Sgibbs		free_bounce_page(dmat, bpage);
48232516Sgibbs	}
48332516Sgibbs}
48432516Sgibbs
48532516Sgibbsvoid
48632516Sgibbs_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
48732516Sgibbs{
48832516Sgibbs	struct bounce_page *bpage;
48932516Sgibbs
49032516Sgibbs	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
49132516Sgibbs
49232516Sgibbs		/*
49332516Sgibbs		 * Handle data bouncing.  We might also
49432516Sgibbs		 * want to add support for invalidating
49532516Sgibbs		 * the caches on broken hardware
49632516Sgibbs		 */
49732516Sgibbs		switch (op) {
49832516Sgibbs		case BUS_DMASYNC_PREWRITE:
49932516Sgibbs			while (bpage != NULL) {
50032516Sgibbs				bcopy((void *)bpage->datavaddr,
50132516Sgibbs				      (void *)bpage->vaddr,
50232516Sgibbs				      bpage->datacount);
50332516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
50432516Sgibbs			}
50532516Sgibbs			break;
50632516Sgibbs
50732516Sgibbs		case BUS_DMASYNC_POSTREAD:
50832516Sgibbs			while (bpage != NULL) {
50932516Sgibbs				bcopy((void *)bpage->vaddr,
51032516Sgibbs				      (void *)bpage->datavaddr,
51132516Sgibbs				      bpage->datacount);
51232516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
51332516Sgibbs			}
51432516Sgibbs			break;
51532516Sgibbs		case BUS_DMASYNC_PREREAD:
51632516Sgibbs		case BUS_DMASYNC_POSTWRITE:
51732516Sgibbs			/* No-ops */
51832516Sgibbs			break;
51932516Sgibbs		}
52032516Sgibbs	}
52132516Sgibbs}
52232516Sgibbs
52332516Sgibbsstatic int
52432516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
52532516Sgibbs{
52632516Sgibbs	int count;
52732516Sgibbs
52832516Sgibbs	count = 0;
52932516Sgibbs	if (total_bpages == 0) {
53032516Sgibbs		STAILQ_INIT(&bounce_page_list);
53132516Sgibbs		STAILQ_INIT(&bounce_map_waitinglist);
53232516Sgibbs		STAILQ_INIT(&bounce_map_callbacklist);
53332516Sgibbs	}
53432516Sgibbs
53532516Sgibbs	while (numpages > 0) {
53632516Sgibbs		struct bounce_page *bpage;
53732516Sgibbs		int s;
53832516Sgibbs
53932516Sgibbs		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
54032516Sgibbs						     M_NOWAIT);
54132516Sgibbs
54232516Sgibbs		if (bpage == NULL)
54332516Sgibbs			break;
54432516Sgibbs		bzero(bpage, sizeof(*bpage));
54532516Sgibbs		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
54632516Sgibbs							 M_NOWAIT, 0ul,
54732516Sgibbs							 dmat->lowaddr,
54835767Sgibbs							 PAGE_SIZE,
54935767Sgibbs							 0);
55032516Sgibbs		if (bpage->vaddr == NULL) {
55132516Sgibbs			free(bpage, M_DEVBUF);
55232516Sgibbs			break;
55332516Sgibbs		}
55432516Sgibbs		bpage->busaddr = pmap_kextract(bpage->vaddr);
55532516Sgibbs		s = splhigh();
55632516Sgibbs		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
55732516Sgibbs		total_bpages++;
55832516Sgibbs		free_bpages++;
55932516Sgibbs		splx(s);
56032516Sgibbs		count++;
56132516Sgibbs		numpages--;
56232516Sgibbs	}
56332516Sgibbs	return (count);
56432516Sgibbs}
56532516Sgibbs
56632516Sgibbsstatic int
56732516Sgibbsreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
56832516Sgibbs{
56932516Sgibbs	int pages;
57032516Sgibbs
57132516Sgibbs	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
57232516Sgibbs	free_bpages -= pages;
57332516Sgibbs	reserved_bpages += pages;
57432516Sgibbs	map->pagesreserved += pages;
57532516Sgibbs	pages = map->pagesneeded - map->pagesreserved;
57632516Sgibbs
57732516Sgibbs	return (pages);
57832516Sgibbs}
57932516Sgibbs
58032516Sgibbsstatic vm_offset_t
58132516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
58232516Sgibbs		bus_size_t size)
58332516Sgibbs{
58432516Sgibbs	int s;
58532516Sgibbs	struct bounce_page *bpage;
58632516Sgibbs
58732516Sgibbs	if (map->pagesneeded == 0)
58832516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
58932516Sgibbs	map->pagesneeded--;
59032516Sgibbs
59132516Sgibbs	if (map->pagesreserved == 0)
59232516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
59332516Sgibbs	map->pagesreserved--;
59432516Sgibbs
59532516Sgibbs	s = splhigh();
59632516Sgibbs	bpage = STAILQ_FIRST(&bounce_page_list);
59732516Sgibbs	if (bpage == NULL)
59832516Sgibbs		panic("add_bounce_page: free page list is empty");
59932516Sgibbs
60032516Sgibbs	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
60132516Sgibbs	reserved_bpages--;
60232516Sgibbs	active_bpages++;
60332516Sgibbs	splx(s);
60432516Sgibbs
60532516Sgibbs	bpage->datavaddr = vaddr;
60632516Sgibbs	bpage->datacount = size;
60732516Sgibbs	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
60832516Sgibbs	return (bpage->busaddr);
60932516Sgibbs}
61032516Sgibbs
61132516Sgibbsstatic void
61232516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
61332516Sgibbs{
61432516Sgibbs	int s;
61532516Sgibbs	struct bus_dmamap *map;
61632516Sgibbs
61732516Sgibbs	bpage->datavaddr = 0;
61832516Sgibbs	bpage->datacount = 0;
61932516Sgibbs
62032516Sgibbs	s = splhigh();
62132516Sgibbs	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
62232516Sgibbs	free_bpages++;
62332516Sgibbs	active_bpages--;
62432516Sgibbs	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
62532516Sgibbs		if (reserve_bounce_pages(map->dmat, map) == 0) {
62632516Sgibbs			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
62732516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
62832516Sgibbs					   map, links);
62932516Sgibbs			busdma_swi_pending = 1;
63032516Sgibbs			setsoftvm();
63132516Sgibbs		}
63232516Sgibbs	}
63332516Sgibbs	splx(s);
63432516Sgibbs}
63532516Sgibbs
63632516Sgibbsvoid
63732516Sgibbsbusdma_swi()
63832516Sgibbs{
63932516Sgibbs	int s;
64032516Sgibbs	struct bus_dmamap *map;
64132516Sgibbs
64232516Sgibbs	s = splhigh();
64332516Sgibbs	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
64432516Sgibbs		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
64532516Sgibbs		splx(s);
64632516Sgibbs		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
64732516Sgibbs				map->callback, map->callback_arg, /*flags*/0);
64832516Sgibbs		s = splhigh();
64932516Sgibbs	}
65032516Sgibbs	splx(s);
65132516Sgibbs}
652