busdma_machdep.c revision 95076
132516Sgibbs/*
240029Sgibbs * Copyright (c) 1997, 1998 Justin T. Gibbs.
332516Sgibbs * All rights reserved.
432516Sgibbs *
532516Sgibbs * Redistribution and use in source and binary forms, with or without
632516Sgibbs * modification, are permitted provided that the following conditions
732516Sgibbs * are met:
832516Sgibbs * 1. Redistributions of source code must retain the above copyright
932516Sgibbs *    notice, this list of conditions, and the following disclaimer,
1032516Sgibbs *    without modification, immediately at the beginning of the file.
1132516Sgibbs * 2. The name of the author may not be used to endorse or promote products
1232516Sgibbs *    derived from this software without specific prior written permission.
1332516Sgibbs *
1432516Sgibbs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1532516Sgibbs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1632516Sgibbs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1732516Sgibbs * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1832516Sgibbs * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1932516Sgibbs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2032516Sgibbs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2132516Sgibbs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2232516Sgibbs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2332516Sgibbs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2432516Sgibbs * SUCH DAMAGE.
2532516Sgibbs *
2650477Speter * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 95076 2002-04-19 22:58:09Z alfred $
2732516Sgibbs */
2832516Sgibbs
2932516Sgibbs#include <sys/param.h>
3032516Sgibbs#include <sys/systm.h>
3132516Sgibbs#include <sys/malloc.h>
3267551Sjhb#include <sys/bus.h>
3367551Sjhb#include <sys/interrupt.h>
3476827Salfred#include <sys/lock.h>
3579224Sdillon#include <sys/proc.h>
3676827Salfred#include <sys/mutex.h>
3732516Sgibbs
3832516Sgibbs#include <vm/vm.h>
3932516Sgibbs#include <vm/vm_page.h>
4032516Sgibbs
4132516Sgibbs#include <machine/bus.h>
4232516Sgibbs#include <machine/md_var.h>
4332516Sgibbs
4432516Sgibbs#define MAX(a,b) (((a) > (b)) ? (a) : (b))
4532516Sgibbs#define MIN(a,b) (((a) < (b)) ? (a) : (b))
4632516Sgibbs#define MAX_BPAGES 128
4732516Sgibbs
4832516Sgibbsstruct bus_dma_tag {
4932516Sgibbs	bus_dma_tag_t	  parent;
5035767Sgibbs	bus_size_t	  alignment;
5132516Sgibbs	bus_size_t	  boundary;
5232516Sgibbs	bus_addr_t	  lowaddr;
5332516Sgibbs	bus_addr_t	  highaddr;
5432516Sgibbs	bus_dma_filter_t *filter;
5532516Sgibbs	void		 *filterarg;
5632516Sgibbs	bus_size_t	  maxsize;
5735767Sgibbs	u_int		  nsegments;
5832516Sgibbs	bus_size_t	  maxsegsz;
5932516Sgibbs	int		  flags;
6032516Sgibbs	int		  ref_count;
6132516Sgibbs	int		  map_count;
6232516Sgibbs};
6332516Sgibbs
6432516Sgibbsstruct bounce_page {
6532516Sgibbs	vm_offset_t	vaddr;		/* kva of bounce buffer */
6632516Sgibbs	bus_addr_t	busaddr;	/* Physical address */
6732516Sgibbs	vm_offset_t	datavaddr;	/* kva of client data */
6832516Sgibbs	bus_size_t	datacount;	/* client data count */
6960938Sjake	STAILQ_ENTRY(bounce_page) links;
7032516Sgibbs};
7132516Sgibbs
7232516Sgibbsint busdma_swi_pending;
7332516Sgibbs
7460938Sjakestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
7532516Sgibbsstatic int free_bpages;
7632516Sgibbsstatic int reserved_bpages;
7732516Sgibbsstatic int active_bpages;
7832516Sgibbsstatic int total_bpages;
7932516Sgibbsstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
8032516Sgibbs
8132516Sgibbsstruct bus_dmamap {
8232516Sgibbs	struct bp_list	       bpages;
8332516Sgibbs	int		       pagesneeded;
8432516Sgibbs	int		       pagesreserved;
8532516Sgibbs	bus_dma_tag_t	       dmat;
8632516Sgibbs	void		      *buf;		/* unmapped buffer pointer */
8732516Sgibbs	bus_size_t	       buflen;		/* unmapped buffer length */
8832516Sgibbs	bus_dmamap_callback_t *callback;
8932516Sgibbs	void		      *callback_arg;
9060938Sjake	STAILQ_ENTRY(bus_dmamap) links;
9132516Sgibbs};
9232516Sgibbs
9360938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
9460938Sjakestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
9532516Sgibbsstatic struct bus_dmamap nobounce_dmamap;
9632516Sgibbs
9732516Sgibbsstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
9832516Sgibbsstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
9932516Sgibbsstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
10032516Sgibbs				   vm_offset_t vaddr, bus_size_t size);
10132516Sgibbsstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
10232516Sgibbsstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
10332516Sgibbs
10495076Salfred/*
10595076Salfred * Return true if a match is made.
10695076Salfred *
10795076Salfred * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
10895076Salfred *
10995076Salfred * If paddr is within the bounds of the dma tag then call the filter callback
11095076Salfred * to check for a match, if there is no filter callback then assume a match.
11195076Salfred */
11232516Sgibbsstatic __inline int
11332516Sgibbsrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
11432516Sgibbs{
11532516Sgibbs	int retval;
11632516Sgibbs
11732516Sgibbs	retval = 0;
11832516Sgibbs	do {
11932516Sgibbs		if (paddr > dmat->lowaddr
12032516Sgibbs		 && paddr <= dmat->highaddr
12132516Sgibbs		 && (dmat->filter == NULL
12232516Sgibbs		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
12332516Sgibbs			retval = 1;
12432516Sgibbs
12532516Sgibbs		dmat = dmat->parent;
12632516Sgibbs	} while (retval == 0 && dmat != NULL);
12732516Sgibbs	return (retval);
12832516Sgibbs}
12932516Sgibbs
13035767Sgibbs#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
13132516Sgibbs/*
13232516Sgibbs * Allocate a device specific dma_tag.
13332516Sgibbs */
13432516Sgibbsint
13535767Sgibbsbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
13635767Sgibbs		   bus_size_t boundary, bus_addr_t lowaddr,
13735767Sgibbs		   bus_addr_t highaddr, bus_dma_filter_t *filter,
13835767Sgibbs		   void *filterarg, bus_size_t maxsize, int nsegments,
13935767Sgibbs		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
14032516Sgibbs{
14132516Sgibbs	bus_dma_tag_t newtag;
14232516Sgibbs	int error = 0;
14332516Sgibbs
14432516Sgibbs	/* Return a NULL tag on failure */
14532516Sgibbs	*dmat = NULL;
14632516Sgibbs
14732516Sgibbs	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
14832516Sgibbs	if (newtag == NULL)
14932516Sgibbs		return (ENOMEM);
15032516Sgibbs
15132516Sgibbs	newtag->parent = parent;
15248449Smjacob	newtag->alignment = alignment;
15332516Sgibbs	newtag->boundary = boundary;
15440286Sdg	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
15540286Sdg	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
15632516Sgibbs	newtag->filter = filter;
15732516Sgibbs	newtag->filterarg = filterarg;
15832516Sgibbs	newtag->maxsize = maxsize;
15932516Sgibbs	newtag->nsegments = nsegments;
16032516Sgibbs	newtag->maxsegsz = maxsegsz;
16132516Sgibbs	newtag->flags = flags;
16232516Sgibbs	newtag->ref_count = 1; /* Count ourself */
16332516Sgibbs	newtag->map_count = 0;
16432516Sgibbs
16532516Sgibbs	/* Take into account any restrictions imposed by our parent tag */
16632516Sgibbs	if (parent != NULL) {
16732516Sgibbs		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
16832516Sgibbs		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
16932516Sgibbs		/*
17032516Sgibbs		 * XXX Not really correct??? Probably need to honor boundary
17132516Sgibbs		 *     all the way up the inheritence chain.
17232516Sgibbs		 */
17335767Sgibbs		newtag->boundary = MAX(parent->boundary, newtag->boundary);
17432516Sgibbs		if (newtag->filter == NULL) {
17532516Sgibbs			/*
17632516Sgibbs			 * Short circuit looking at our parent directly
17735256Sdes			 * since we have encapsulated all of its information
17832516Sgibbs			 */
17932516Sgibbs			newtag->filter = parent->filter;
18032516Sgibbs			newtag->filterarg = parent->filterarg;
18132516Sgibbs			newtag->parent = parent->parent;
18232516Sgibbs		}
18332516Sgibbs		if (newtag->parent != NULL) {
18432516Sgibbs			parent->ref_count++;
18532516Sgibbs		}
18632516Sgibbs	}
18732516Sgibbs
18835767Sgibbs	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
18932516Sgibbs		/* Must bounce */
19032516Sgibbs
19132516Sgibbs		if (lowaddr > bounce_lowaddr) {
19232516Sgibbs			/*
19332516Sgibbs			 * Go through the pool and kill any pages
19432516Sgibbs			 * that don't reside below lowaddr.
19532516Sgibbs			 */
19635767Sgibbs			panic("bus_dma_tag_create: page reallocation "
19732516Sgibbs			      "not implemented");
19832516Sgibbs		}
19932516Sgibbs		if (ptoa(total_bpages) < maxsize) {
20032516Sgibbs			int pages;
20132516Sgibbs
20232516Sgibbs			pages = atop(maxsize) - total_bpages;
20332516Sgibbs
20432516Sgibbs			/* Add pages to our bounce pool */
20532516Sgibbs			if (alloc_bounce_pages(newtag, pages) < pages)
20632516Sgibbs				error = ENOMEM;
20732516Sgibbs		}
20835767Sgibbs		/* Performed initial allocation */
20935767Sgibbs		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
21032516Sgibbs	}
21132516Sgibbs
21232516Sgibbs	if (error != 0) {
21332516Sgibbs		free(newtag, M_DEVBUF);
21432516Sgibbs	} else {
21532516Sgibbs		*dmat = newtag;
21632516Sgibbs	}
21732516Sgibbs	return (error);
21832516Sgibbs}
21932516Sgibbs
22032516Sgibbsint
22132516Sgibbsbus_dma_tag_destroy(bus_dma_tag_t dmat)
22232516Sgibbs{
22332516Sgibbs	if (dmat != NULL) {
22432516Sgibbs
22532516Sgibbs		if (dmat->map_count != 0)
22632516Sgibbs			return (EBUSY);
22732516Sgibbs
22832516Sgibbs		while (dmat != NULL) {
22932516Sgibbs			bus_dma_tag_t parent;
23032516Sgibbs
23132516Sgibbs			parent = dmat->parent;
23232516Sgibbs			dmat->ref_count--;
23332516Sgibbs			if (dmat->ref_count == 0) {
23432516Sgibbs				free(dmat, M_DEVBUF);
23540029Sgibbs				/*
23640029Sgibbs				 * Last reference count, so
23740029Sgibbs				 * release our reference
23840029Sgibbs				 * count on our parent.
23940029Sgibbs				 */
24040029Sgibbs				dmat = parent;
24140029Sgibbs			} else
24240029Sgibbs				dmat = NULL;
24332516Sgibbs		}
24432516Sgibbs	}
24532516Sgibbs	return (0);
24632516Sgibbs}
24732516Sgibbs
24832516Sgibbs/*
24932516Sgibbs * Allocate a handle for mapping from kva/uva/physical
25032516Sgibbs * address space into bus device space.
25132516Sgibbs */
25232516Sgibbsint
25332516Sgibbsbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
25432516Sgibbs{
25532516Sgibbs	int error;
25632516Sgibbs
25732516Sgibbs	error = 0;
25832516Sgibbs
25932516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem)) {
26032516Sgibbs		/* Must bounce */
26132516Sgibbs		int maxpages;
26232516Sgibbs
26332516Sgibbs		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
26469781Sdwmalone					     M_NOWAIT | M_ZERO);
26569781Sdwmalone		if (*mapp == NULL)
26635767Sgibbs			return (ENOMEM);
26769781Sdwmalone
26869781Sdwmalone		/* Initialize the new map */
26969781Sdwmalone		STAILQ_INIT(&((*mapp)->bpages));
27069781Sdwmalone
27132516Sgibbs		/*
27232516Sgibbs		 * Attempt to add pages to our pool on a per-instance
27332516Sgibbs		 * basis up to a sane limit.
27432516Sgibbs		 */
27532516Sgibbs		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
27635767Sgibbs		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
27735767Sgibbs		 || (dmat->map_count > 0
27835767Sgibbs		  && total_bpages < maxpages)) {
27932516Sgibbs			int pages;
28032516Sgibbs
28135767Sgibbs			if (dmat->lowaddr > bounce_lowaddr) {
28235767Sgibbs				/*
28335767Sgibbs				 * Go through the pool and kill any pages
28435767Sgibbs				 * that don't reside below lowaddr.
28535767Sgibbs				 */
28635767Sgibbs				panic("bus_dmamap_create: page reallocation "
28735767Sgibbs				      "not implemented");
28835767Sgibbs			}
28932516Sgibbs			pages = atop(dmat->maxsize);
29032516Sgibbs			pages = MIN(maxpages - total_bpages, pages);
29135767Sgibbs			error = alloc_bounce_pages(dmat, pages);
29235767Sgibbs
29335767Sgibbs			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
29435767Sgibbs				if (error == 0)
29535767Sgibbs					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
29635767Sgibbs			} else {
29735767Sgibbs				error = 0;
29835767Sgibbs			}
29932516Sgibbs		}
30032516Sgibbs	} else {
30140029Sgibbs		*mapp = NULL;
30232516Sgibbs	}
30332516Sgibbs	if (error == 0)
30432516Sgibbs		dmat->map_count++;
30532516Sgibbs	return (error);
30632516Sgibbs}
30732516Sgibbs
30832516Sgibbs/*
30932516Sgibbs * Destroy a handle for mapping from kva/uva/physical
31032516Sgibbs * address space into bus device space.
31132516Sgibbs */
31232516Sgibbsint
31332516Sgibbsbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
31432516Sgibbs{
31532516Sgibbs	if (map != NULL) {
31632516Sgibbs		if (STAILQ_FIRST(&map->bpages) != NULL)
31732516Sgibbs			return (EBUSY);
31832516Sgibbs		free(map, M_DEVBUF);
31932516Sgibbs	}
32032516Sgibbs	dmat->map_count--;
32132516Sgibbs	return (0);
32232516Sgibbs}
32332516Sgibbs
32435767Sgibbs
32535767Sgibbs/*
32635767Sgibbs * Allocate a piece of memory that can be efficiently mapped into
32735767Sgibbs * bus device space based on the constraints lited in the dma tag.
32835767Sgibbs * A dmamap to for use with dmamap_load is also allocated.
32935767Sgibbs */
33035767Sgibbsint
33135767Sgibbsbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
33235767Sgibbs		 bus_dmamap_t *mapp)
33335767Sgibbs{
33435767Sgibbs	/* If we succeed, no mapping/bouncing will be required */
33540029Sgibbs	*mapp = NULL;
33635767Sgibbs
33735767Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
33835767Sgibbs		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
33935767Sgibbs				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
34035767Sgibbs	} else {
34135767Sgibbs		/*
34235767Sgibbs		 * XXX Use Contigmalloc until it is merged into this facility
34335767Sgibbs		 *     and handles multi-seg allocations.  Nobody is doing
34435767Sgibbs		 *     multi-seg allocations yet though.
34535767Sgibbs		 */
34635767Sgibbs		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
34748449Smjacob		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
34848449Smjacob		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
34948449Smjacob		    dmat->boundary);
35035767Sgibbs	}
35135767Sgibbs	if (*vaddr == NULL)
35235767Sgibbs		return (ENOMEM);
35335767Sgibbs	return (0);
35435767Sgibbs}
35535767Sgibbs
35635767Sgibbs/*
35735767Sgibbs * Free a piece of memory and it's allociated dmamap, that was allocated
35895076Salfred * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
35935767Sgibbs */
36035767Sgibbsvoid
36135767Sgibbsbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
36235767Sgibbs{
36335767Sgibbs	/*
36435767Sgibbs	 * dmamem does not need to be bounced, so the map should be
36535767Sgibbs	 * NULL
36635767Sgibbs	 */
36749859Sgibbs	if (map != NULL)
36835767Sgibbs		panic("bus_dmamem_free: Invalid map freed\n");
36940029Sgibbs	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem))
37040029Sgibbs		free(vaddr, M_DEVBUF);
37181711Swpaul	else
37281711Swpaul		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
37335767Sgibbs}
37435767Sgibbs
37532516Sgibbs#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
37632516Sgibbs
37732516Sgibbs/*
37832516Sgibbs * Map the buffer buf into bus space using the dmamap map.
37932516Sgibbs */
38032516Sgibbsint
38132516Sgibbsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
38232516Sgibbs		bus_size_t buflen, bus_dmamap_callback_t *callback,
38332516Sgibbs		void *callback_arg, int flags)
38432516Sgibbs{
38532516Sgibbs	vm_offset_t		vaddr;
38632516Sgibbs	vm_offset_t		paddr;
38732516Sgibbs#ifdef __GNUC__
38832516Sgibbs	bus_dma_segment_t	dm_segments[dmat->nsegments];
38932516Sgibbs#else
39032516Sgibbs	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
39132516Sgibbs#endif
39232516Sgibbs	bus_dma_segment_t      *sg;
39332516Sgibbs	int			seg;
39432516Sgibbs	int			error;
39548449Smjacob	vm_offset_t		nextpaddr;
39632516Sgibbs
39740029Sgibbs	if (map == NULL)
39840029Sgibbs		map = &nobounce_dmamap;
39940029Sgibbs
40032516Sgibbs	error = 0;
40132516Sgibbs	/*
40232516Sgibbs	 * If we are being called during a callback, pagesneeded will
40332516Sgibbs	 * be non-zero, so we can avoid doing the work twice.
40432516Sgibbs	 */
40532516Sgibbs	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
40632516Sgibbs		vm_offset_t	vendaddr;
40732516Sgibbs
40832516Sgibbs		/*
40932516Sgibbs		 * Count the number of bounce pages
41032516Sgibbs		 * needed in order to complete this transfer
41132516Sgibbs		 */
41240286Sdg		vaddr = trunc_page((vm_offset_t)buf);
41332516Sgibbs		vendaddr = (vm_offset_t)buf + buflen;
41432516Sgibbs
41532516Sgibbs		while (vaddr < vendaddr) {
41632516Sgibbs			paddr = pmap_kextract(vaddr);
41732516Sgibbs			if (run_filter(dmat, paddr) != 0) {
41832516Sgibbs
41932516Sgibbs				map->pagesneeded++;
42032516Sgibbs			}
42132516Sgibbs			vaddr += PAGE_SIZE;
42232516Sgibbs		}
42332516Sgibbs	}
42432516Sgibbs
42532516Sgibbs	/* Reserve Necessary Bounce Pages */
42632516Sgibbs	if (map->pagesneeded != 0) {
42732516Sgibbs		int s;
42832516Sgibbs
42932516Sgibbs		s = splhigh();
43032516Sgibbs	 	if (reserve_bounce_pages(dmat, map) != 0) {
43132516Sgibbs
43232516Sgibbs			/* Queue us for resources */
43332516Sgibbs			map->dmat = dmat;
43432516Sgibbs			map->buf = buf;
43532516Sgibbs			map->buflen = buflen;
43632516Sgibbs			map->callback = callback;
43732516Sgibbs			map->callback_arg = callback_arg;
43832516Sgibbs
43932516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
44032516Sgibbs			splx(s);
44132516Sgibbs
44232516Sgibbs			return (EINPROGRESS);
44332516Sgibbs		}
44432516Sgibbs		splx(s);
44532516Sgibbs	}
44632516Sgibbs
44732516Sgibbs	vaddr = (vm_offset_t)buf;
44832516Sgibbs	sg = &dm_segments[0];
44932516Sgibbs	seg = 1;
45032516Sgibbs	sg->ds_len = 0;
45132516Sgibbs
45248449Smjacob	nextpaddr = 0;
45348449Smjacob	do {
45448449Smjacob		bus_size_t	size;
45532516Sgibbs
45648449Smjacob		paddr = pmap_kextract(vaddr);
45748449Smjacob		size = PAGE_SIZE - (paddr & PAGE_MASK);
45848449Smjacob		if (size > buflen)
45948449Smjacob			size = buflen;
46032516Sgibbs
46148449Smjacob		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
46248449Smjacob			paddr = add_bounce_page(dmat, map, vaddr, size);
46348449Smjacob		}
46432516Sgibbs
46548449Smjacob		if (sg->ds_len == 0) {
46648449Smjacob			sg->ds_addr = paddr;
46748449Smjacob			sg->ds_len = size;
46848449Smjacob		} else if (paddr == nextpaddr) {
46948449Smjacob			sg->ds_len += size;
47048449Smjacob		} else {
47148449Smjacob			/* Go to the next segment */
47248449Smjacob			sg++;
47348449Smjacob			seg++;
47448449Smjacob			if (seg > dmat->nsegments)
47548449Smjacob				break;
47648449Smjacob			sg->ds_addr = paddr;
47748449Smjacob			sg->ds_len = size;
47848449Smjacob		}
47948449Smjacob		vaddr += size;
48048449Smjacob		nextpaddr = paddr + size;
48148449Smjacob		buflen -= size;
48232516Sgibbs
48348449Smjacob	} while (buflen > 0);
48441764Sdillon
48532516Sgibbs	if (buflen != 0) {
48637555Sbde		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
48739755Sbde		       (u_long)buflen);
48832516Sgibbs		error = EFBIG;
48932516Sgibbs	}
49032516Sgibbs
49132516Sgibbs	(*callback)(callback_arg, dm_segments, seg, error);
49232516Sgibbs
49332516Sgibbs	return (0);
49432516Sgibbs}
49532516Sgibbs
49632516Sgibbs/*
49732516Sgibbs * Release the mapping held by map.
49832516Sgibbs */
49932516Sgibbsvoid
50032516Sgibbs_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
50132516Sgibbs{
50232516Sgibbs	struct bounce_page *bpage;
50332516Sgibbs
50432516Sgibbs	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
50532516Sgibbs		STAILQ_REMOVE_HEAD(&map->bpages, links);
50632516Sgibbs		free_bounce_page(dmat, bpage);
50732516Sgibbs	}
50832516Sgibbs}
50932516Sgibbs
51032516Sgibbsvoid
51132516Sgibbs_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
51232516Sgibbs{
51332516Sgibbs	struct bounce_page *bpage;
51432516Sgibbs
51532516Sgibbs	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
51632516Sgibbs
51732516Sgibbs		/*
51832516Sgibbs		 * Handle data bouncing.  We might also
51932516Sgibbs		 * want to add support for invalidating
52032516Sgibbs		 * the caches on broken hardware
52132516Sgibbs		 */
52232516Sgibbs		switch (op) {
52332516Sgibbs		case BUS_DMASYNC_PREWRITE:
52432516Sgibbs			while (bpage != NULL) {
52532516Sgibbs				bcopy((void *)bpage->datavaddr,
52632516Sgibbs				      (void *)bpage->vaddr,
52732516Sgibbs				      bpage->datacount);
52832516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
52932516Sgibbs			}
53032516Sgibbs			break;
53132516Sgibbs
53232516Sgibbs		case BUS_DMASYNC_POSTREAD:
53332516Sgibbs			while (bpage != NULL) {
53432516Sgibbs				bcopy((void *)bpage->vaddr,
53532516Sgibbs				      (void *)bpage->datavaddr,
53632516Sgibbs				      bpage->datacount);
53732516Sgibbs				bpage = STAILQ_NEXT(bpage, links);
53832516Sgibbs			}
53932516Sgibbs			break;
54032516Sgibbs		case BUS_DMASYNC_PREREAD:
54132516Sgibbs		case BUS_DMASYNC_POSTWRITE:
54232516Sgibbs			/* No-ops */
54332516Sgibbs			break;
54432516Sgibbs		}
54532516Sgibbs	}
54632516Sgibbs}
54732516Sgibbs
54832516Sgibbsstatic int
54932516Sgibbsalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
55032516Sgibbs{
55132516Sgibbs	int count;
55232516Sgibbs
55332516Sgibbs	count = 0;
55432516Sgibbs	if (total_bpages == 0) {
55532516Sgibbs		STAILQ_INIT(&bounce_page_list);
55632516Sgibbs		STAILQ_INIT(&bounce_map_waitinglist);
55732516Sgibbs		STAILQ_INIT(&bounce_map_callbacklist);
55832516Sgibbs	}
55932516Sgibbs
56032516Sgibbs	while (numpages > 0) {
56132516Sgibbs		struct bounce_page *bpage;
56232516Sgibbs		int s;
56332516Sgibbs
56432516Sgibbs		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
56569781Sdwmalone						     M_NOWAIT | M_ZERO);
56632516Sgibbs
56732516Sgibbs		if (bpage == NULL)
56832516Sgibbs			break;
56932516Sgibbs		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
57032516Sgibbs							 M_NOWAIT, 0ul,
57132516Sgibbs							 dmat->lowaddr,
57235767Sgibbs							 PAGE_SIZE,
57335767Sgibbs							 0);
57432516Sgibbs		if (bpage->vaddr == NULL) {
57532516Sgibbs			free(bpage, M_DEVBUF);
57632516Sgibbs			break;
57732516Sgibbs		}
57832516Sgibbs		bpage->busaddr = pmap_kextract(bpage->vaddr);
57932516Sgibbs		s = splhigh();
58032516Sgibbs		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
58132516Sgibbs		total_bpages++;
58232516Sgibbs		free_bpages++;
58332516Sgibbs		splx(s);
58432516Sgibbs		count++;
58532516Sgibbs		numpages--;
58632516Sgibbs	}
58732516Sgibbs	return (count);
58832516Sgibbs}
58932516Sgibbs
59032516Sgibbsstatic int
59132516Sgibbsreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
59232516Sgibbs{
59332516Sgibbs	int pages;
59432516Sgibbs
59532516Sgibbs	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
59632516Sgibbs	free_bpages -= pages;
59732516Sgibbs	reserved_bpages += pages;
59832516Sgibbs	map->pagesreserved += pages;
59932516Sgibbs	pages = map->pagesneeded - map->pagesreserved;
60032516Sgibbs
60132516Sgibbs	return (pages);
60232516Sgibbs}
60332516Sgibbs
60432516Sgibbsstatic vm_offset_t
60532516Sgibbsadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
60632516Sgibbs		bus_size_t size)
60732516Sgibbs{
60832516Sgibbs	int s;
60932516Sgibbs	struct bounce_page *bpage;
61032516Sgibbs
61132516Sgibbs	if (map->pagesneeded == 0)
61232516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
61332516Sgibbs	map->pagesneeded--;
61432516Sgibbs
61532516Sgibbs	if (map->pagesreserved == 0)
61632516Sgibbs		panic("add_bounce_page: map doesn't need any pages");
61732516Sgibbs	map->pagesreserved--;
61832516Sgibbs
61932516Sgibbs	s = splhigh();
62032516Sgibbs	bpage = STAILQ_FIRST(&bounce_page_list);
62132516Sgibbs	if (bpage == NULL)
62232516Sgibbs		panic("add_bounce_page: free page list is empty");
62332516Sgibbs
62432516Sgibbs	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
62532516Sgibbs	reserved_bpages--;
62632516Sgibbs	active_bpages++;
62732516Sgibbs	splx(s);
62832516Sgibbs
62932516Sgibbs	bpage->datavaddr = vaddr;
63032516Sgibbs	bpage->datacount = size;
63132516Sgibbs	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
63232516Sgibbs	return (bpage->busaddr);
63332516Sgibbs}
63432516Sgibbs
63532516Sgibbsstatic void
63632516Sgibbsfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
63732516Sgibbs{
63832516Sgibbs	int s;
63932516Sgibbs	struct bus_dmamap *map;
64032516Sgibbs
64132516Sgibbs	bpage->datavaddr = 0;
64232516Sgibbs	bpage->datacount = 0;
64332516Sgibbs
64432516Sgibbs	s = splhigh();
64532516Sgibbs	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
64632516Sgibbs	free_bpages++;
64732516Sgibbs	active_bpages--;
64832516Sgibbs	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
64932516Sgibbs		if (reserve_bounce_pages(map->dmat, map) == 0) {
65032516Sgibbs			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
65132516Sgibbs			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
65232516Sgibbs					   map, links);
65332516Sgibbs			busdma_swi_pending = 1;
65488900Sjhb			swi_sched(vm_ih, 0);
65532516Sgibbs		}
65632516Sgibbs	}
65732516Sgibbs	splx(s);
65832516Sgibbs}
65932516Sgibbs
66032516Sgibbsvoid
66195076Salfredbusdma_swi(void)
66232516Sgibbs{
66332516Sgibbs	int s;
66432516Sgibbs	struct bus_dmamap *map;
66532516Sgibbs
66632516Sgibbs	s = splhigh();
66732516Sgibbs	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
66832516Sgibbs		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
66932516Sgibbs		splx(s);
67032516Sgibbs		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
67132516Sgibbs				map->callback, map->callback_arg, /*flags*/0);
67232516Sgibbs		s = splhigh();
67332516Sgibbs	}
67432516Sgibbs	splx(s);
67532516Sgibbs}
676