busdma_machdep.c revision 67551
132516Sgibbs/*
240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs.
332516Sgibbs * All rights reserved.
432516Sgibbs *
532516Sgibbs * Redistribution and use in source and binary forms, with or without
632516Sgibbs * modification, are permitted provided that the following conditions
732516Sgibbs * are met:
832516Sgibbs * 1. Redistributions of source code must retain the above copyright
932516Sgibbs *    notice, this list of conditions, and the following disclaimer,
1032516Sgibbs *    without modification, immediately at the beginning of the file.
1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products
1232516Sgibbs *    derived from this software without specific prior written permission.
1332516Sgibbs *
1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2432516Sgibbs * SUCH DAMAGE.
2532516Sgibbs *
2650477Speter * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 67551 2000-10-25 05:19:40Z jhb $
2732516Sgibbs */
2832516Sgibbs
2932516Sgibbs#include <sys/param.h>
3032516Sgibbs#include <sys/systm.h>
3132516Sgibbs#include <sys/malloc.h>
3267551Sjhb#include <sys/bus.h>
3367551Sjhb#include <sys/interrupt.h>
3432516Sgibbs
3532516Sgibbs#include <vm/vm.h>
3632516Sgibbs#include <vm/vm_page.h>
3732516Sgibbs
3832516Sgibbs#include <machine/bus.h>
3932516Sgibbs#include <machine/md_var.h>
4032516Sgibbs
4132516Sgibbs#define MAX(a,b) (((a) > (b)) ? (a) : (b))
4232516Sgibbs#define MIN(a,b) (((a) < (b)) ? (a) : (b))
4332516Sgibbs#define MAX_BPAGES 128
4432516Sgibbs
4532516Sgibbsstruct bus_dma_tag {
4632516Sgibbs	bus_dma_tag_t	  parent;
4735767Sgibbs	bus_size_t	  alignment;
4832516Sgibbs	bus_size_t	  boundary;
4932516Sgibbs	bus_addr_t	  lowaddr;
5032516Sgibbs	bus_addr_t	  highaddr;
5132516Sgibbs	bus_dma_filter_t *filter;
5232516Sgibbs	void		 *filterarg;
5332516Sgibbs	bus_size_t	  maxsize;
5435767Sgibbs	u_int		  nsegments;
5532516Sgibbs	bus_size_t	  maxsegsz;
5632516Sgibbs	int		  flags;
5732516Sgibbs	int		  ref_count;
5832516Sgibbs	int		  map_count;
5932516Sgibbs};
6032516Sgibbs
6132516Sgibbsstruct bounce_page {
6232516Sgibbs	vm_offset_t	vaddr;		/* kva of bounce buffer */
6332516Sgibbs	bus_addr_t	busaddr;	/* Physical address */
6432516Sgibbs	vm_offset_t	datavaddr;	/* kva of client data */
6532516Sgibbs	bus_size_t	datacount;	/* client data count */
6660938Sjake	STAILQ_ENTRY(bounce_page) links;
6732516Sgibbs};
6832516Sgibbs
6932516Sgibbsint busdma_swi_pending;
7032516Sgibbs
7160938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
7232516Sgibbsstatic int free_bpages;
7332516Sgibbsstatic int reserved_bpages;
7432516Sgibbsstatic int active_bpages;
7532516Sgibbsstatic int total_bpages;
7632516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
7732516Sgibbs
7832516Sgibbsstruct bus_dmamap {
7932516Sgibbs	struct bp_list	       bpages;
8032516Sgibbs	int		       pagesneeded;
8132516Sgibbs	int		       pagesreserved;
8232516Sgibbs	bus_dma_tag_t	       dmat;
8332516Sgibbs	void		      *buf;		/* unmapped buffer pointer */
8432516Sgibbs	bus_size_t	       buflen;		/* unmapped buffer length */
8532516Sgibbs	bus_dmamap_callback_t *callback;
8632516Sgibbs	void		      *callback_arg;
8760938Sjake	STAILQ_ENTRY(bus_dmamap) links;
8832516Sgibbs};
8932516Sgibbs
9060938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
9160938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
9232516Sgibbsstatic struct bus_dmamap nobounce_dmamap;
9332516Sgibbs
9432516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
9532516Sgibbsstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
9632516Sgibbsstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
9732516Sgibbs				   vm_offset_t vaddr, bus_size_t size);
9832516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
9932516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
10032516Sgibbs
10132516Sgibbsstatic __inline int
10232516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
10332516Sgibbs{
10432516Sgibbs	int retval;
10532516Sgibbs
10632516Sgibbs	retval = 0;
10732516Sgibbs	do {
10832516Sgibbs		if (paddr > dmat->lowaddr
10932516Sgibbs		 && paddr <= dmat->highaddr
11032516Sgibbs		 && (dmat->filter == NULL
11132516Sgibbs		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
11232516Sgibbs			retval = 1;
11332516Sgibbs
11432516Sgibbs		dmat = dmat->parent;
11532516Sgibbs	} while (retval == 0 && dmat != NULL);
11632516Sgibbs	return (retval);
11732516Sgibbs}
11832516Sgibbs
11935767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
12032516Sgibbs/*
12132516Sgibbs * Allocate a device specific dma_tag.
12232516Sgibbs */
12332516Sgibbsint
12435767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
12535767Sgibbs		   bus_size_t boundary, bus_addr_t lowaddr,
12635767Sgibbs		   bus_addr_t highaddr, bus_dma_filter_t *filter,
12735767Sgibbs		   void *filterarg, bus_size_t maxsize, int nsegments,
12835767Sgibbs		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
12932516Sgibbs{
13032516Sgibbs	bus_dma_tag_t newtag;
13132516Sgibbs	int error = 0;
13232516Sgibbs
13332516Sgibbs	/* Return a NULL tag on failure */
13432516Sgibbs	*dmat = NULL;
13532516Sgibbs
13632516Sgibbs	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
13732516Sgibbs	if (newtag == NULL)
13832516Sgibbs		return (ENOMEM);
13932516Sgibbs
14032516Sgibbs	newtag->parent = parent;
14148449Smjacob	newtag->alignment = alignment;
14232516Sgibbs	newtag->boundary = boundary;
14340286Sdg	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
14440286Sdg	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
14532516Sgibbs	newtag->filter = filter;
14632516Sgibbs	newtag->filterarg = filterarg;
14732516Sgibbs	newtag->maxsize = maxsize;
14832516Sgibbs	newtag->nsegments = nsegments;
14932516Sgibbs	newtag->maxsegsz = maxsegsz;
15032516Sgibbs	newtag->flags = flags;
15132516Sgibbs	newtag->ref_count = 1; /* Count ourself */
15232516Sgibbs	newtag->map_count = 0;
15332516Sgibbs
15432516Sgibbs	/* Take into account any restrictions imposed by our parent tag */
15532516Sgibbs	if (parent != NULL) {
15632516Sgibbs		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
15732516Sgibbs		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
15832516Sgibbs		/*
15932516Sgibbs		 * XXX Not really correct??? Probably need to honor boundary
16032516Sgibbs		 *     all the way up the inheritence chain.
16132516Sgibbs		 */
16235767Sgibbs		newtag->boundary = MAX(parent->boundary, newtag->boundary);
16332516Sgibbs		if (newtag->filter == NULL) {
16432516Sgibbs			/*
16532516Sgibbs			 * Short circuit looking at our parent directly
16635256Sdes			 * since we have encapsulated all of its information
16732516Sgibbs			 */
16832516Sgibbs			newtag->filter = parent->filter;
16932516Sgibbs			newtag->filterarg = parent->filterarg;
17032516Sgibbs			newtag->parent = parent->parent;
17132516Sgibbs		}
17232516Sgibbs		if (newtag->parent != NULL) {
17332516Sgibbs			parent->ref_count++;
17432516Sgibbs		}
17532516Sgibbs	}
17632516Sgibbs
17735767Sgibbs	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
17832516Sgibbs		/* Must bounce */
17932516Sgibbs
18032516Sgibbs		if (lowaddr > bounce_lowaddr) {
18132516Sgibbs			/*
18232516Sgibbs			 * Go through the pool and kill any pages
18332516Sgibbs			 * that don't reside below lowaddr.
18432516Sgibbs			 */
18535767Sgibbs			panic("bus_dma_tag_create: page reallocation "
18632516Sgibbs			      "not implemented");
18732516Sgibbs		}
18832516Sgibbs		if (ptoa(total_bpages) < maxsize) {
18932516Sgibbs			int pages;
19032516Sgibbs
19132516Sgibbs			pages = atop(maxsize) - total_bpages;
19232516Sgibbs
19332516Sgibbs			/* Add pages to our bounce pool */
19432516Sgibbs			if (alloc_bounce_pages(newtag, pages) < pages)
19532516Sgibbs				error = ENOMEM;
19632516Sgibbs		}
19735767Sgibbs		/* Performed initial allocation */
19835767Sgibbs		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
19932516Sgibbs	}
20032516Sgibbs
20132516Sgibbs	if (error != 0) {
20232516Sgibbs		free(newtag, M_DEVBUF);
20332516Sgibbs	} else {
20432516Sgibbs		*dmat = newtag;
20532516Sgibbs	}
20632516Sgibbs	return (error);
20732516Sgibbs}
20832516Sgibbs
20932516Sgibbsint
21032516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat)
21132516Sgibbs{
21232516Sgibbs	if (dmat != NULL) {
21332516Sgibbs
21432516Sgibbs		if (dmat->map_count != 0)
21532516Sgibbs			return (EBUSY);
21632516Sgibbs
21732516Sgibbs		while (dmat != NULL) {
21832516Sgibbs			bus_dma_tag_t parent;
21932516Sgibbs
22032516Sgibbs			parent = dmat->parent;
22132516Sgibbs			dmat->ref_count--;
22232516Sgibbs			if (dmat->ref_count == 0) {
22332516Sgibbs				free(dmat, M_DEVBUF);
22440029Sgibbs				/*
22540029Sgibbs				 * Last reference count, so
22640029Sgibbs				 * release our reference
22740029Sgibbs				 * count on our parent.
22840029Sgibbs				 */
22940029Sgibbs				dmat = parent;
23040029Sgibbs			} else
23140029Sgibbs				dmat = NULL;
23232516Sgibbs		}
23332516Sgibbs	}
23432516Sgibbs	return (0);
23532516Sgibbs}
23632516Sgibbs
23732516Sgibbs/*
23832516Sgibbs * Allocate a handle for mapping from kva/uva/physical
23932516Sgibbs * address space into bus device space.
24032516Sgibbs */
24132516Sgibbsint
24232516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
24332516Sgibbs{
24432516Sgibbs	int error;
24532516Sgibbs
24632516Sgibbs	error = 0;
24732516Sgibbs
24832516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem)) {
24932516Sgibbs		/* Must bounce */
25032516Sgibbs		int maxpages;
25132516Sgibbs
25232516Sgibbs		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
25332516Sgibbs					     M_NOWAIT);
25432516Sgibbs		if (*mapp == NULL) {
25535767Sgibbs			return (ENOMEM);
25632516Sgibbs		} else {
25732516Sgibbs			/* Initialize the new map */
25832516Sgibbs			bzero(*mapp, sizeof(**mapp));
25932516Sgibbs			STAILQ_INIT(&((*mapp)->bpages));
26032516Sgibbs		}
26132516Sgibbs		/*
26232516Sgibbs		 * Attempt to add pages to our pool on a per-instance
26332516Sgibbs		 * basis up to a sane limit.
26432516Sgibbs		 */
26532516Sgibbs		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
26635767Sgibbs		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
26735767Sgibbs		 || (dmat->map_count > 0
26835767Sgibbs		  && total_bpages < maxpages)) {
26932516Sgibbs			int pages;
27032516Sgibbs
27135767Sgibbs			if (dmat->lowaddr > bounce_lowaddr) {
27235767Sgibbs				/*
27335767Sgibbs				 * Go through the pool and kill any pages
27435767Sgibbs				 * that don't reside below lowaddr.
27535767Sgibbs				 */
27635767Sgibbs				panic("bus_dmamap_create: page reallocation "
27735767Sgibbs				      "not implemented");
27835767Sgibbs			}
27932516Sgibbs			pages = atop(dmat->maxsize);
28032516Sgibbs			pages = MIN(maxpages - total_bpages, pages);
28135767Sgibbs			error = alloc_bounce_pages(dmat, pages);
28235767Sgibbs
28335767Sgibbs			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
28435767Sgibbs				if (error == 0)
28535767Sgibbs					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
28635767Sgibbs			} else {
28735767Sgibbs				error = 0;
28835767Sgibbs			}
28932516Sgibbs		}
29032516Sgibbs	} else {
29140029Sgibbs		*mapp = NULL;
29232516Sgibbs	}
29332516Sgibbs	if (error == 0)
29432516Sgibbs		dmat->map_count++;
29532516Sgibbs	return (error);
29632516Sgibbs}
29732516Sgibbs
29832516Sgibbs/*
29932516Sgibbs * Destroy a handle for mapping from kva/uva/physical
30032516Sgibbs * address space into bus device space.
30132516Sgibbs */
30232516Sgibbsint
30332516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
30432516Sgibbs{
30532516Sgibbs	if (map != NULL) {
30632516Sgibbs		if (STAILQ_FIRST(&map->bpages) != NULL)
30732516Sgibbs			return (EBUSY);
30832516Sgibbs		free(map, M_DEVBUF);
30932516Sgibbs	}
31032516Sgibbs	dmat->map_count--;
31132516Sgibbs	return (0);
31232516Sgibbs}
31332516Sgibbs
31435767Sgibbs
31535767Sgibbs/*
31635767Sgibbs * Allocate a piece of memory that can be efficiently mapped into
31735767Sgibbs * bus device space based on the constraints lited in the dma tag.
31835767Sgibbs * A dmamap to for use with dmamap_load is also allocated.
31935767Sgibbs */
32035767Sgibbsint
32135767Sgibbsbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
32235767Sgibbs		 bus_dmamap_t *mapp)
32335767Sgibbs{
32435767Sgibbs	/* If we succeed, no mapping/bouncing will be required */
32540029Sgibbs	*mapp = NULL;
32635767Sgibbs
32735767Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
32835767Sgibbs		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
32935767Sgibbs				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
33035767Sgibbs	} else {
33135767Sgibbs		/*
33235767Sgibbs		 * XXX Use Contigmalloc until it is merged into this facility
33335767Sgibbs		 *     and handles multi-seg allocations.  Nobody is doing
33435767Sgibbs		 *     multi-seg allocations yet though.
33535767Sgibbs		 */
33635767Sgibbs		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
33748449Smjacob		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
33848449Smjacob		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
33948449Smjacob		    dmat->boundary);
34035767Sgibbs	}
34135767Sgibbs	if (*vaddr == NULL)
34235767Sgibbs		return (ENOMEM);
34335767Sgibbs	return (0);
34435767Sgibbs}
34535767Sgibbs
34635767Sgibbs/*
34735767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated
34835767Sgibbs * via bus_dmamem_alloc.
34935767Sgibbs */
35035767Sgibbsvoid
35135767Sgibbsbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
35235767Sgibbs{
35335767Sgibbs	/*
35435767Sgibbs	 * dmamem does not need to be bounced, so the map should be
35535767Sgibbs	 * NULL
35635767Sgibbs	 */
35749859Sgibbs	if (map != NULL)
35835767Sgibbs		panic("bus_dmamem_free: Invalid map freed\n");
35940029Sgibbs	/* XXX There is no "contigfree" and "free" doesn't work */
36040029Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
36140029Sgibbs		free(vaddr, M_DEVBUF);
36235767Sgibbs}
36335767Sgibbs
36432516Sgibbs#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
36532516Sgibbs
36632516Sgibbs/*
36732516Sgibbs * Map the buffer buf into bus space using the dmamap map.
36832516Sgibbs */
36932516Sgibbsint
37032516Sgibbsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
37132516Sgibbs		bus_size_t buflen, bus_dmamap_callback_t *callback,
37232516Sgibbs		void *callback_arg, int flags)
37332516Sgibbs{
37432516Sgibbs	vm_offset_t		vaddr;
37532516Sgibbs	vm_offset_t		paddr;
37632516Sgibbs#ifdef __GNUC__
37732516Sgibbs	bus_dma_segment_t	dm_segments[dmat->nsegments];
37832516Sgibbs#else
37932516Sgibbs	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
38032516Sgibbs#endif
38132516Sgibbs	bus_dma_segment_t      *sg;
38232516Sgibbs	int			seg;
38332516Sgibbs	int			error;
38448449Smjacob	vm_offset_t		nextpaddr;
38532516Sgibbs
38640029Sgibbs	if (map == NULL)
38740029Sgibbs		map = &nobounce_dmamap;
38840029Sgibbs
38932516Sgibbs	error = 0;
39032516Sgibbs	/*
39132516Sgibbs	 * If we are being called during a callback, pagesneeded will
39232516Sgibbs	 * be non-zero, so we can avoid doing the work twice.
39332516Sgibbs	 */
39432516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
39532516Sgibbs		vm_offset_t	vendaddr;
39632516Sgibbs
39732516Sgibbs		/*
39832516Sgibbs		 * Count the number of bounce pages
39932516Sgibbs		 * needed in order to complete this transfer
40032516Sgibbs		 */
40140286Sdg		vaddr = trunc_page((vm_offset_t)buf);
40232516Sgibbs		vendaddr = (vm_offset_t)buf + buflen;
40332516Sgibbs
40432516Sgibbs		while (vaddr < vendaddr) {
40532516Sgibbs			paddr = pmap_kextract(vaddr);
40632516Sgibbs			if (run_filter(dmat, paddr) != 0) {
40732516Sgibbs
40832516Sgibbs				map->pagesneeded++;
40932516Sgibbs			}
41032516Sgibbs			vaddr += PAGE_SIZE;
41132516Sgibbs		}
41232516Sgibbs	}
41332516Sgibbs
41432516Sgibbs	/* Reserve Necessary Bounce Pages */
41532516Sgibbs	if (map->pagesneeded != 0) {
41632516Sgibbs		int s;
41732516Sgibbs
41832516Sgibbs		s = splhigh();
41932516Sgibbs	 	if (reserve_bounce_pages(dmat, map) != 0) {
42032516Sgibbs
42132516Sgibbs			/* Queue us for resources */
42232516Sgibbs			map->dmat = dmat;
42332516Sgibbs			map->buf = buf;
42432516Sgibbs			map->buflen = buflen;
42532516Sgibbs			map->callback = callback;
42632516Sgibbs			map->callback_arg = callback_arg;
42732516Sgibbs
42832516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
42932516Sgibbs			splx(s);
43032516Sgibbs
43132516Sgibbs			return (EINPROGRESS);
43232516Sgibbs		}
43332516Sgibbs		splx(s);
43432516Sgibbs	}
43532516Sgibbs
43632516Sgibbs	vaddr = (vm_offset_t)buf;
43732516Sgibbs	sg = &dm_segments[0];
43832516Sgibbs	seg = 1;
43932516Sgibbs	sg->ds_len = 0;
44032516Sgibbs
44148449Smjacob	nextpaddr = 0;
44248449Smjacob	do {
44348449Smjacob		bus_size_t	size;
44432516Sgibbs
44548449Smjacob		paddr = pmap_kextract(vaddr);
44648449Smjacob		size = PAGE_SIZE - (paddr & PAGE_MASK);
44748449Smjacob		if (size > buflen)
44848449Smjacob			size = buflen;
44932516Sgibbs
45048449Smjacob		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
45148449Smjacob			paddr = add_bounce_page(dmat, map, vaddr, size);
45248449Smjacob		}
45332516Sgibbs
45448449Smjacob		if (sg->ds_len == 0) {
45548449Smjacob			sg->ds_addr = paddr;
45648449Smjacob			sg->ds_len = size;
45748449Smjacob		} else if (paddr == nextpaddr) {
45848449Smjacob			sg->ds_len += size;
45948449Smjacob		} else {
46048449Smjacob			/* Go to the next segment */
46148449Smjacob			sg++;
46248449Smjacob			seg++;
46348449Smjacob			if (seg > dmat->nsegments)
46448449Smjacob				break;
46548449Smjacob			sg->ds_addr = paddr;
46648449Smjacob			sg->ds_len = size;
46748449Smjacob		}
46848449Smjacob		vaddr += size;
46948449Smjacob		nextpaddr = paddr + size;
47048449Smjacob		buflen -= size;
47132516Sgibbs
47248449Smjacob	} while (buflen > 0);
47341764Sdillon
47432516Sgibbs	if (buflen != 0) {
47537555Sbde		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
47639755Sbde		       (u_long)buflen);
47732516Sgibbs		error = EFBIG;
47832516Sgibbs	}
47932516Sgibbs
48032516Sgibbs	(*callback)(callback_arg, dm_segments, seg, error);
48132516Sgibbs
48232516Sgibbs	return (0);
48332516Sgibbs}
48432516Sgibbs
48532516Sgibbs/*
48632516Sgibbs * Release the mapping held by map.
48732516Sgibbs */
48832516Sgibbsvoid
48932516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
49032516Sgibbs{
49132516Sgibbs	struct bounce_page *bpage;
49232516Sgibbs
49332516Sgibbs	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
49432516Sgibbs		STAILQ_REMOVE_HEAD(&map->bpages, links);
49532516Sgibbs		free_bounce_page(dmat, bpage);
49632516Sgibbs	}
49732516Sgibbs}
49832516Sgibbs
49932516Sgibbsvoid
50032516Sgibbs_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
50132516Sgibbs{
50232516Sgibbs	struct bounce_page *bpage;
50332516Sgibbs
50432516Sgibbs	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
50532516Sgibbs
50632516Sgibbs		/*
50732516Sgibbs		 * Handle data bouncing.  We might also
50832516Sgibbs		 * want to add support for invalidating
50932516Sgibbs		 * the caches on broken hardware
51032516Sgibbs		 */
51132516Sgibbs		switch (op) {
51232516Sgibbs		case BUS_DMASYNC_PREWRITE:
51332516Sgibbs			while (bpage != NULL) {
51432516Sgibbs				bcopy((void *)bpage->datavaddr,
51532516Sgibbs				      (void *)bpage->vaddr,
51632516Sgibbs				      bpage->datacount);
51732516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
51832516Sgibbs			}
51932516Sgibbs			break;
52032516Sgibbs
52132516Sgibbs		case BUS_DMASYNC_POSTREAD:
52232516Sgibbs			while (bpage != NULL) {
52332516Sgibbs				bcopy((void *)bpage->vaddr,
52432516Sgibbs				      (void *)bpage->datavaddr,
52532516Sgibbs				      bpage->datacount);
52632516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
52732516Sgibbs			}
52832516Sgibbs			break;
52932516Sgibbs		case BUS_DMASYNC_PREREAD:
53032516Sgibbs		case BUS_DMASYNC_POSTWRITE:
53132516Sgibbs			/* No-ops */
53232516Sgibbs			break;
53332516Sgibbs		}
53432516Sgibbs	}
53532516Sgibbs}
53632516Sgibbs
53732516Sgibbsstatic int
53832516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
53932516Sgibbs{
54032516Sgibbs	int count;
54132516Sgibbs
54232516Sgibbs	count = 0;
54332516Sgibbs	if (total_bpages == 0) {
54432516Sgibbs		STAILQ_INIT(&bounce_page_list);
54532516Sgibbs		STAILQ_INIT(&bounce_map_waitinglist);
54632516Sgibbs		STAILQ_INIT(&bounce_map_callbacklist);
54732516Sgibbs	}
54832516Sgibbs
54932516Sgibbs	while (numpages > 0) {
55032516Sgibbs		struct bounce_page *bpage;
55132516Sgibbs		int s;
55232516Sgibbs
55332516Sgibbs		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
55432516Sgibbs						     M_NOWAIT);
55532516Sgibbs
55632516Sgibbs		if (bpage == NULL)
55732516Sgibbs			break;
55832516Sgibbs		bzero(bpage, sizeof(*bpage));
55932516Sgibbs		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
56032516Sgibbs							 M_NOWAIT, 0ul,
56132516Sgibbs							 dmat->lowaddr,
56235767Sgibbs							 PAGE_SIZE,
56335767Sgibbs							 0);
56432516Sgibbs		if (bpage->vaddr == NULL) {
56532516Sgibbs			free(bpage, M_DEVBUF);
56632516Sgibbs			break;
56732516Sgibbs		}
56832516Sgibbs		bpage->busaddr = pmap_kextract(bpage->vaddr);
56932516Sgibbs		s = splhigh();
57032516Sgibbs		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
57132516Sgibbs		total_bpages++;
57232516Sgibbs		free_bpages++;
57332516Sgibbs		splx(s);
57432516Sgibbs		count++;
57532516Sgibbs		numpages--;
57632516Sgibbs	}
57732516Sgibbs	return (count);
57832516Sgibbs}
57932516Sgibbs
58032516Sgibbsstatic int
58132516Sgibbsreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
58232516Sgibbs{
58332516Sgibbs	int pages;
58432516Sgibbs
58532516Sgibbs	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
58632516Sgibbs	free_bpages -= pages;
58732516Sgibbs	reserved_bpages += pages;
58832516Sgibbs	map->pagesreserved += pages;
58932516Sgibbs	pages = map->pagesneeded - map->pagesreserved;
59032516Sgibbs
59132516Sgibbs	return (pages);
59232516Sgibbs}
59332516Sgibbs
59432516Sgibbsstatic vm_offset_t
59532516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
59632516Sgibbs		bus_size_t size)
59732516Sgibbs{
59832516Sgibbs	int s;
59932516Sgibbs	struct bounce_page *bpage;
60032516Sgibbs
60132516Sgibbs	if (map->pagesneeded == 0)
60232516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
60332516Sgibbs	map->pagesneeded--;
60432516Sgibbs
60532516Sgibbs	if (map->pagesreserved == 0)
60632516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
60732516Sgibbs	map->pagesreserved--;
60832516Sgibbs
60932516Sgibbs	s = splhigh();
61032516Sgibbs	bpage = STAILQ_FIRST(&bounce_page_list);
61132516Sgibbs	if (bpage == NULL)
61232516Sgibbs		panic("add_bounce_page: free page list is empty");
61332516Sgibbs
61432516Sgibbs	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
61532516Sgibbs	reserved_bpages--;
61632516Sgibbs	active_bpages++;
61732516Sgibbs	splx(s);
61832516Sgibbs
61932516Sgibbs	bpage->datavaddr = vaddr;
62032516Sgibbs	bpage->datacount = size;
62132516Sgibbs	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
62232516Sgibbs	return (bpage->busaddr);
62332516Sgibbs}
62432516Sgibbs
62532516Sgibbsstatic void
62632516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
62732516Sgibbs{
62832516Sgibbs	int s;
62932516Sgibbs	struct bus_dmamap *map;
63032516Sgibbs
63132516Sgibbs	bpage->datavaddr = 0;
63232516Sgibbs	bpage->datacount = 0;
63332516Sgibbs
63432516Sgibbs	s = splhigh();
63532516Sgibbs	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
63632516Sgibbs	free_bpages++;
63732516Sgibbs	active_bpages--;
63832516Sgibbs	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
63932516Sgibbs		if (reserve_bounce_pages(map->dmat, map) == 0) {
64032516Sgibbs			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
64132516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
64232516Sgibbs					   map, links);
64332516Sgibbs			busdma_swi_pending = 1;
64467551Sjhb			sched_swi(vm_ih, SWI_NOSWITCH);
64532516Sgibbs		}
64632516Sgibbs	}
64732516Sgibbs	splx(s);
64832516Sgibbs}
64932516Sgibbs
65032516Sgibbsvoid
65132516Sgibbsbusdma_swi()
65232516Sgibbs{
65332516Sgibbs	int s;
65432516Sgibbs	struct bus_dmamap *map;
65532516Sgibbs
65632516Sgibbs	s = splhigh();
65732516Sgibbs	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
65832516Sgibbs		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
65932516Sgibbs		splx(s);
66032516Sgibbs		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
66132516Sgibbs				map->callback, map->callback_arg, /*flags*/0);
66232516Sgibbs		s = splhigh();
66332516Sgibbs	}
66432516Sgibbs	splx(s);
66532516Sgibbs}
666