busdma_machdep.c revision 35767
11590Srgrimes/*
21590Srgrimes * Copyright (c) 1997 Justin T. Gibbs.
31590Srgrimes * All rights reserved.
41590Srgrimes *
51590Srgrimes * Redistribution and use in source and binary forms, with or without
61590Srgrimes * modification, are permitted provided that the following conditions
71590Srgrimes * are met:
81590Srgrimes * 1. Redistributions of source code must retain the above copyright
91590Srgrimes *    notice, this list of conditions, and the following disclaimer,
101590Srgrimes *    without modification, immediately at the beginning of the file.
111590Srgrimes * 2. The name of the author may not be used to endorse or promote products
121590Srgrimes *    derived from this software without specific prior written permission.
131590Srgrimes *
141590Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
151590Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
161590Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
171590Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
181590Srgrimes * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
191590Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
201590Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
211590Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
221590Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
231590Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
241590Srgrimes * SUCH DAMAGE.
251590Srgrimes *
261590Srgrimes *      $Id: busdma_machdep.c,v 1.5 1998/04/17 22:36:26 des Exp $
271590Srgrimes */
281590Srgrimes
291590Srgrimes#include <sys/param.h>
301590Srgrimes#include <sys/systm.h>
311590Srgrimes#include <sys/malloc.h>
321590Srgrimes
331590Srgrimes#include <vm/vm.h>
341590Srgrimes#include <vm/vm_prot.h>
351590Srgrimes#include <vm/vm_page.h>
361590Srgrimes
3727419Scharnier#include <machine/bus.h>
381590Srgrimes#include <machine/md_var.h>
391590Srgrimes
401590Srgrimes#define MAX(a,b) (((a) > (b)) ? (a) : (b))
411590Srgrimes#define MIN(a,b) (((a) < (b)) ? (a) : (b))
421590Srgrimes#define MAX_BPAGES 128
431590Srgrimes
44116390Scharnierstruct bus_dma_tag {
451590Srgrimes	bus_dma_tag_t	  parent;
461590Srgrimes	bus_size_t	  alignment;
47116390Scharnier	bus_size_t	  boundary;
4827419Scharnier	bus_addr_t	  lowaddr;
49116390Scharnier	bus_addr_t	  highaddr;
5099112Sobrien	bus_dma_filter_t *filter;
5199112Sobrien	void		 *filterarg;
521590Srgrimes	bus_size_t	  maxsize;
531590Srgrimes	u_int		  nsegments;
5427419Scharnier	bus_size_t	  maxsegsz;
551590Srgrimes	int		  flags;
561590Srgrimes	int		  ref_count;
571590Srgrimes	int		  map_count;
581590Srgrimes};
591590Srgrimes
6085632Sschweikhstruct bounce_page {
611590Srgrimes	vm_offset_t	vaddr;		/* kva of bounce buffer */
621590Srgrimes	bus_addr_t	busaddr;	/* Physical address */
6385632Sschweikh	vm_offset_t	datavaddr;	/* kva of client data */
641590Srgrimes	bus_size_t	datacount;	/* client data count */
6585632Sschweikh	STAILQ_ENTRY(bounce_page) links;
6685632Sschweikh};
6793440Sdwmalone
681590Srgrimesint busdma_swi_pending;
6993440Sdwmalone
701590Srgrimesstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
711590Srgrimesstatic int free_bpages;
721590Srgrimesstatic int reserved_bpages;
7385632Sschweikhstatic int active_bpages;
7485632Sschweikhstatic int total_bpages;
751590Srgrimesstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
761590Srgrimes
771590Srgrimesstruct bus_dmamap {
781590Srgrimes	struct bp_list	       bpages;
791590Srgrimes	int		       pagesneeded;
801590Srgrimes	int		       pagesreserved;
811590Srgrimes	bus_dma_tag_t	       dmat;
8285632Sschweikh	void		      *buf;		/* unmapped buffer pointer */
831590Srgrimes	bus_size_t	       buflen;		/* unmapped buffer length */
84131184Sschweikh	bus_dmamap_callback_t *callback;
851590Srgrimes	void		      *callback_arg;
861590Srgrimes	STAILQ_ENTRY(bus_dmamap) links;
87105244Scharnier};
881590Srgrimes
891590Srgrimesstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
901590Srgrimesstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
911590Srgrimesstatic struct bus_dmamap nobounce_dmamap;
9293440Sdwmalone
93131184Sschweikhstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
941590Srgrimesstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
951590Srgrimesstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
961590Srgrimes				   vm_offset_t vaddr, bus_size_t size);
971590Srgrimesstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
981590Srgrimesstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
991590Srgrimes
1001590Srgrimesstatic __inline int
1011590Srgrimesrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
1021590Srgrimes{
103116390Scharnier	int retval;
1041590Srgrimes
1051590Srgrimes	retval = 0;
1061590Srgrimes	do {
1071590Srgrimes		if (paddr > dmat->lowaddr
1081590Srgrimes		 && paddr <= dmat->highaddr
1091590Srgrimes		 && (dmat->filter == NULL
110116390Scharnier		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
111116390Scharnier			retval = 1;
1121590Srgrimes
113116390Scharnier		dmat = dmat->parent;
114116390Scharnier	} while (retval == 0 && dmat != NULL);
1151590Srgrimes	return (retval);
116116390Scharnier}
117116390Scharnier
1181590Srgrimes#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
119116390Scharnier/*
120116390Scharnier * Allocate a device specific dma_tag.
1211590Srgrimes */
1221590Srgrimesint
1231590Srgrimesbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
1241590Srgrimes		   bus_size_t boundary, bus_addr_t lowaddr,
1251590Srgrimes		   bus_addr_t highaddr, bus_dma_filter_t *filter,
1261590Srgrimes		   void *filterarg, bus_size_t maxsize, int nsegments,
1271590Srgrimes		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
1281590Srgrimes{
1291590Srgrimes	bus_dma_tag_t newtag;
1301590Srgrimes	int error = 0;
1311590Srgrimes
1321590Srgrimes	/* Return a NULL tag on failure */
1331590Srgrimes	*dmat = NULL;
1341590Srgrimes
135116390Scharnier	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
136116390Scharnier	if (newtag == NULL)
1371590Srgrimes		return (ENOMEM);
1381590Srgrimes
1391590Srgrimes	newtag->parent = parent;
1401590Srgrimes	newtag->boundary = boundary;
1411590Srgrimes	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
1421590Srgrimes	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
1431590Srgrimes	newtag->filter = filter;
1441590Srgrimes	newtag->filterarg = filterarg;
1451590Srgrimes	newtag->maxsize = maxsize;
1461590Srgrimes	newtag->nsegments = nsegments;
1471590Srgrimes	newtag->maxsegsz = maxsegsz;
1481590Srgrimes	newtag->flags = flags;
1491590Srgrimes	newtag->ref_count = 1; /* Count ourself */
1501590Srgrimes	newtag->map_count = 0;
1511590Srgrimes
1521590Srgrimes	/* Take into account any restrictions imposed by our parent tag */
1531590Srgrimes	if (parent != NULL) {
1541590Srgrimes		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
155162264Scharnier		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
1561590Srgrimes		/*
1571590Srgrimes		 * XXX Not really correct??? Probably need to honor boundary
1581590Srgrimes		 *     all the way up the inheritence chain.
1591590Srgrimes		 */
1601590Srgrimes		newtag->boundary = MAX(parent->boundary, newtag->boundary);
1611590Srgrimes		if (newtag->filter == NULL) {
1621590Srgrimes			/*
1631590Srgrimes			 * Short circuit looking at our parent directly
1641590Srgrimes			 * since we have encapsulated all of its information
1651590Srgrimes			 */
1661590Srgrimes			newtag->filter = parent->filter;
1671590Srgrimes			newtag->filterarg = parent->filterarg;
1681590Srgrimes			newtag->parent = parent->parent;
1691590Srgrimes		}
170125633Sbde		if (newtag->parent != NULL) {
171125633Sbde			parent->ref_count++;
172125633Sbde		}
1731590Srgrimes	}
1741590Srgrimes
1751590Srgrimes	if (newtag->lowaddr < ptoa(Maxmem) && (flags & BUS_DMA_ALLOCNOW) != 0) {
1761590Srgrimes		/* Must bounce */
1771590Srgrimes
1781590Srgrimes		if (lowaddr > bounce_lowaddr) {
1791590Srgrimes			/*
1801590Srgrimes			 * Go through the pool and kill any pages
18169795Sobrien			 * that don't reside below lowaddr.
1821590Srgrimes			 */
1831590Srgrimes			panic("bus_dma_tag_create: page reallocation "
1841590Srgrimes			      "not implemented");
1851590Srgrimes		}
1861590Srgrimes		if (ptoa(total_bpages) < maxsize) {
1871590Srgrimes			int pages;
1881590Srgrimes
1891590Srgrimes			pages = atop(maxsize) - total_bpages;
1901590Srgrimes
1911590Srgrimes			/* Add pages to our bounce pool */
1921590Srgrimes			if (alloc_bounce_pages(newtag, pages) < pages)
1931590Srgrimes				error = ENOMEM;
1941590Srgrimes		}
1951590Srgrimes		/* Performed initial allocation */
1961590Srgrimes		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
1971590Srgrimes	}
1981590Srgrimes
1991590Srgrimes	if (error != 0) {
2001590Srgrimes		free(newtag, M_DEVBUF);
2011590Srgrimes	} else {
202211132Skevlo		*dmat = newtag;
2031590Srgrimes	}
2041590Srgrimes	return (error);
205211132Skevlo}
20662894Skris
2071590Srgrimesint
2081590Srgrimesbus_dma_tag_destroy(bus_dma_tag_t dmat)
209211132Skevlo{
2101590Srgrimes	if (dmat != NULL) {
2111590Srgrimes
2121590Srgrimes		if (dmat->map_count != 0)
21327419Scharnier			return (EBUSY);
2141590Srgrimes
2151590Srgrimes		while (dmat != NULL) {
216211132Skevlo			bus_dma_tag_t parent;
21762894Skris
2181590Srgrimes			parent = dmat->parent;
2191590Srgrimes			dmat->ref_count--;
22027419Scharnier			if (dmat->ref_count == 0) {
2211590Srgrimes				free(dmat, M_DEVBUF);
2221590Srgrimes			}
2231590Srgrimes			dmat = parent;
2241590Srgrimes		}
225211132Skevlo	}
22640502Sthepish	return (0);
227211132Skevlo}
22840502Sthepish
2291590Srgrimes/*
2301590Srgrimes * Allocate a handle for mapping from kva/uva/physical
2311590Srgrimes * address space into bus device space.
2321590Srgrimes */
2331590Srgrimesint
23448566Sbillfbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
2351590Srgrimes{
2361590Srgrimes	int error;
2371590Srgrimes
2381590Srgrimes	error = 0;
2391590Srgrimes
2401590Srgrimes	if (dmat->lowaddr < ptoa(Maxmem)) {
2411590Srgrimes		/* Must bounce */
2421590Srgrimes		int maxpages;
2431590Srgrimes
2441590Srgrimes		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
2451590Srgrimes					     M_NOWAIT);
2461590Srgrimes		if (*mapp == NULL) {
2471590Srgrimes			return (ENOMEM);
2481590Srgrimes		} else {
2491590Srgrimes			/* Initialize the new map */
2501590Srgrimes			bzero(*mapp, sizeof(**mapp));
2511590Srgrimes			STAILQ_INIT(&((*mapp)->bpages));
2521590Srgrimes		}
2531590Srgrimes		/*
2541590Srgrimes		 * Attempt to add pages to our pool on a per-instance
2551590Srgrimes		 * basis up to a sane limit.
2561590Srgrimes		 */
2571590Srgrimes		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
2581590Srgrimes		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
259125633Sbde		 || (dmat->map_count > 0
260125633Sbde		  && total_bpages < maxpages)) {
2611590Srgrimes			int pages;
2621590Srgrimes
2631590Srgrimes			if (dmat->lowaddr > bounce_lowaddr) {
2641590Srgrimes				/*
2651590Srgrimes				 * Go through the pool and kill any pages
2661590Srgrimes				 * that don't reside below lowaddr.
2671590Srgrimes				 */
2681590Srgrimes				panic("bus_dmamap_create: page reallocation "
26998771Sjmallett				      "not implemented");
27098771Sjmallett			}
2711590Srgrimes			pages = atop(dmat->maxsize);
2721590Srgrimes			pages = MIN(maxpages - total_bpages, pages);
2731590Srgrimes			error = alloc_bounce_pages(dmat, pages);
2741590Srgrimes
2751590Srgrimes			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
2761590Srgrimes				if (error == 0)
2771590Srgrimes					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
2781590Srgrimes			} else {
2791590Srgrimes				error = 0;
2801590Srgrimes			}
2811590Srgrimes		}
2821590Srgrimes	} else {
2831590Srgrimes		*mapp = &nobounce_dmamap;
2841590Srgrimes	}
28593440Sdwmalone	if (error == 0)
2861590Srgrimes		dmat->map_count++;
2871590Srgrimes	return (error);
2881590Srgrimes}
2891590Srgrimes
2901590Srgrimes/*
2911590Srgrimes * Destroy a handle for mapping from kva/uva/physical
2921590Srgrimes * address space into bus device space.
2931590Srgrimes */
2941590Srgrimesint
2951590Srgrimesbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
2961590Srgrimes{
2971590Srgrimes	if (map != NULL) {
2981590Srgrimes		if (STAILQ_FIRST(&map->bpages) != NULL)
2991590Srgrimes			return (EBUSY);
3001590Srgrimes		free(map, M_DEVBUF);
3011590Srgrimes	}
3021590Srgrimes	dmat->map_count--;
3031590Srgrimes	return (0);
3041590Srgrimes}
3051590Srgrimes
3061590Srgrimes
3071590Srgrimes/*
3081590Srgrimes * Allocate a piece of memory that can be efficiently mapped into
3091590Srgrimes * bus device space based on the constraints lited in the dma tag.
3101590Srgrimes * A dmamap to for use with dmamap_load is also allocated.
3111590Srgrimes */
3121590Srgrimesint
3131590Srgrimesbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
3141590Srgrimes		 bus_dmamap_t *mapp)
3151590Srgrimes{
3161590Srgrimes	/* If we succeed, no mapping/bouncing will be required */
3171590Srgrimes	*mapp = &nobounce_dmamap;
3181590Srgrimes
3191590Srgrimes	if ((dmat->maxsize <= PAGE_SIZE) && dmat->lowaddr >= ptoa(Maxmem)) {
3201590Srgrimes		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
3211590Srgrimes				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
3221590Srgrimes	} else {
3231590Srgrimes		/*
3241590Srgrimes		 * XXX Use Contigmalloc until it is merged into this facility
3251590Srgrimes		 *     and handles multi-seg allocations.  Nobody is doing
3261590Srgrimes		 *     multi-seg allocations yet though.
3271590Srgrimes		 */
3281590Srgrimes		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
329228992Suqs				      (flags & BUS_DMA_NOWAIT)
3301590Srgrimes				      ? M_NOWAIT : M_WAITOK,
3311590Srgrimes				      0ul, dmat->lowaddr, 1ul, dmat->boundary);
3321590Srgrimes	}
3331590Srgrimes	if (*vaddr == NULL)
3341590Srgrimes		return (ENOMEM);
3351590Srgrimes	return (0);
3361590Srgrimes}
3371590Srgrimes
3381590Srgrimes/*
3391590Srgrimes * Free a piece of memory and it's allociated dmamap, that was allocated
3401590Srgrimes * via bus_dmamem_alloc.
3411590Srgrimes */
3421590Srgrimesvoid
3431590Srgrimesbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
3441590Srgrimes{
3451590Srgrimes	/*
3461590Srgrimes	 * dmamem does not need to be bounced, so the map should be
3471590Srgrimes	 * NULL
3481590Srgrimes	 */
3491590Srgrimes	if (map != NULL)
3501590Srgrimes		panic("bus_dmamem_free: Invalid map freed\n");
3511590Srgrimes	free(vaddr, M_DEVBUF);
3521590Srgrimes}
3531590Srgrimes
3541590Srgrimes#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
3551590Srgrimes
3561590Srgrimes/*
3571590Srgrimes * Map the buffer buf into bus space using the dmamap map.
3581590Srgrimes */
3591590Srgrimesint
3601590Srgrimesbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
3611590Srgrimes		bus_size_t buflen, bus_dmamap_callback_t *callback,
3621590Srgrimes		void *callback_arg, int flags)
3631590Srgrimes{
3641590Srgrimes	vm_offset_t		vaddr;
3651590Srgrimes	vm_offset_t		paddr;
3661590Srgrimes#ifdef __GNUC__
367116390Scharnier	bus_dma_segment_t	dm_segments[dmat->nsegments];
3681590Srgrimes#else
3691590Srgrimes	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
3701590Srgrimes#endif
3711590Srgrimes	bus_dma_segment_t      *sg;
3721590Srgrimes	int			seg;
3731590Srgrimes	int			error;
3741590Srgrimes
3751590Srgrimes	error = 0;
3761590Srgrimes	/*
377105244Scharnier	 * If we are being called during a callback, pagesneeded will
3781590Srgrimes	 * be non-zero, so we can avoid doing the work twice.
3791590Srgrimes	 */
3801590Srgrimes	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
38185632Sschweikh		vm_offset_t	vendaddr;
38285632Sschweikh
38385632Sschweikh		/*
38485632Sschweikh		 * Count the number of bounce pages
3851590Srgrimes		 * needed in order to complete this transfer
3861590Srgrimes		 */
387228992Suqs		vaddr = trunc_page(buf);
3881590Srgrimes		vendaddr = (vm_offset_t)buf + buflen;
3891590Srgrimes
3901590Srgrimes		while (vaddr < vendaddr) {
3911590Srgrimes			paddr = pmap_kextract(vaddr);
3921590Srgrimes			if (run_filter(dmat, paddr) != 0) {
3931590Srgrimes
3941590Srgrimes				map->pagesneeded++;
3951590Srgrimes			}
3961590Srgrimes			vaddr += PAGE_SIZE;
3971590Srgrimes		}
3981590Srgrimes	}
3991590Srgrimes
4001590Srgrimes	/* Reserve Necessary Bounce Pages */
40185632Sschweikh	if (map->pagesneeded != 0) {
4021590Srgrimes		int s;
4031590Srgrimes
4041590Srgrimes		s = splhigh();
4051590Srgrimes	 	if (reserve_bounce_pages(dmat, map) != 0) {
4061590Srgrimes
4071590Srgrimes			/* Queue us for resources */
4081590Srgrimes			map->dmat = dmat;
4091590Srgrimes			map->buf = buf;
4101590Srgrimes			map->buflen = buflen;
4111590Srgrimes			map->callback = callback;
4121590Srgrimes			map->callback_arg = callback_arg;
4131590Srgrimes
4141590Srgrimes			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
4151590Srgrimes			splx(s);
4161590Srgrimes
4171590Srgrimes			return (EINPROGRESS);
4181590Srgrimes		}
4191590Srgrimes		splx(s);
4201590Srgrimes	}
4211590Srgrimes
4221590Srgrimes	vaddr = (vm_offset_t)buf;
4231590Srgrimes	sg = &dm_segments[0];
4241590Srgrimes	seg = 1;
4251590Srgrimes	sg->ds_len = 0;
4261590Srgrimes
4271590Srgrimes	do {
4281590Srgrimes		bus_size_t	size;
4291590Srgrimes		vm_offset_t	nextpaddr;	/* GCC warning expected */
4301590Srgrimes
4311590Srgrimes		paddr = pmap_kextract(vaddr);
4321590Srgrimes		size = PAGE_SIZE - (paddr & PAGE_MASK);
4331590Srgrimes		if (size > buflen)
4341590Srgrimes			size = buflen;
4351590Srgrimes
4361590Srgrimes		if (map->pagesneeded != 0
437116390Scharnier		 && run_filter(dmat, paddr)) {
4381590Srgrimes			paddr = add_bounce_page(dmat, map, vaddr, size);
4391590Srgrimes		}
4401590Srgrimes
4411590Srgrimes		if (sg->ds_len == 0) {
4421590Srgrimes			sg->ds_addr = paddr;
4431590Srgrimes			sg->ds_len = size;
4441590Srgrimes		} else if (paddr == nextpaddr) {
4451590Srgrimes			sg->ds_len += size;
4461590Srgrimes		} else {
4471590Srgrimes			/* Go to the next segment */
4481590Srgrimes			sg++;
4491590Srgrimes			seg++;
4501590Srgrimes			if (seg > dmat->nsegments)
4511590Srgrimes				break;
4521590Srgrimes			sg->ds_addr = paddr;
4531590Srgrimes			sg->ds_len = size;
4541590Srgrimes		}
4551590Srgrimes		vaddr += size;
4561590Srgrimes		nextpaddr = paddr + size;
4571590Srgrimes		buflen -= size;
45885632Sschweikh	} while (buflen > 0);
4591590Srgrimes
4601590Srgrimes	if (buflen != 0) {
4611590Srgrimes		printf("bus_dmamap_load: Too many segs! buf_len = 0x%x\n",
4621590Srgrimes		       buflen);
4631590Srgrimes		error = EFBIG;
4641590Srgrimes	}
4651590Srgrimes
4661590Srgrimes	(*callback)(callback_arg, dm_segments, seg, error);
4671590Srgrimes
4681590Srgrimes	return (0);
4691590Srgrimes}
4701590Srgrimes
4711590Srgrimes/*
4721590Srgrimes * Release the mapping held by map.
4731590Srgrimes */
4741590Srgrimesvoid
4751590Srgrimes_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
4761590Srgrimes{
4771590Srgrimes	struct bounce_page *bpage;
4781590Srgrimes
4791590Srgrimes	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
4801590Srgrimes		STAILQ_REMOVE_HEAD(&map->bpages, links);
4811590Srgrimes		free_bounce_page(dmat, bpage);
4821590Srgrimes	}
4831590Srgrimes}
4841590Srgrimes
4851590Srgrimesvoid
4861590Srgrimes_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
4871590Srgrimes{
4881590Srgrimes	struct bounce_page *bpage;
4891590Srgrimes
4901590Srgrimes	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
4911590Srgrimes
4921590Srgrimes		/*
4931590Srgrimes		 * Handle data bouncing.  We might also
4941590Srgrimes		 * want to add support for invalidating
4951590Srgrimes		 * the caches on broken hardware
4961590Srgrimes		 */
4971590Srgrimes		switch (op) {
4981590Srgrimes		case BUS_DMASYNC_PREWRITE:
4991590Srgrimes			while (bpage != NULL) {
5001590Srgrimes				bcopy((void *)bpage->datavaddr,
5011590Srgrimes				      (void *)bpage->vaddr,
5021590Srgrimes				      bpage->datacount);
5031590Srgrimes				bpage = STAILQ_NEXT(bpage, links);
5041590Srgrimes			}
5051590Srgrimes			break;
5061590Srgrimes
5071590Srgrimes		case BUS_DMASYNC_POSTREAD:
5081590Srgrimes			while (bpage != NULL) {
5091590Srgrimes				bcopy((void *)bpage->vaddr,
5101590Srgrimes				      (void *)bpage->datavaddr,
5111590Srgrimes				      bpage->datacount);
5121590Srgrimes				bpage = STAILQ_NEXT(bpage, links);
5131590Srgrimes			}
5141590Srgrimes			break;
5151590Srgrimes		case BUS_DMASYNC_PREREAD:
5161590Srgrimes		case BUS_DMASYNC_POSTWRITE:
5171590Srgrimes			/* No-ops */
5181590Srgrimes			break;
5191590Srgrimes		}
5201590Srgrimes	}
5211590Srgrimes}
5221590Srgrimes
5231590Srgrimesstatic int
5241590Srgrimesalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
5251590Srgrimes{
5261590Srgrimes	int count;
5271590Srgrimes
5281590Srgrimes	count = 0;
5291590Srgrimes	if (total_bpages == 0) {
5301590Srgrimes		STAILQ_INIT(&bounce_page_list);
5311590Srgrimes		STAILQ_INIT(&bounce_map_waitinglist);
5321590Srgrimes		STAILQ_INIT(&bounce_map_callbacklist);
5331590Srgrimes	}
5341590Srgrimes
5351590Srgrimes	while (numpages > 0) {
5361590Srgrimes		struct bounce_page *bpage;
5371590Srgrimes		int s;
5381590Srgrimes
5391590Srgrimes		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
5401590Srgrimes						     M_NOWAIT);
5411590Srgrimes
5421590Srgrimes		if (bpage == NULL)
5431590Srgrimes			break;
5441590Srgrimes		bzero(bpage, sizeof(*bpage));
5451590Srgrimes		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
5461590Srgrimes							 M_NOWAIT, 0ul,
5471590Srgrimes							 dmat->lowaddr,
5481590Srgrimes							 PAGE_SIZE,
5491590Srgrimes							 0);
5501590Srgrimes		if (bpage->vaddr == NULL) {
5511590Srgrimes			free(bpage, M_DEVBUF);
55269795Sobrien			break;
55369795Sobrien		}
55469795Sobrien		bpage->busaddr = pmap_kextract(bpage->vaddr);
5551590Srgrimes		s = splhigh();
5561590Srgrimes		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
5571590Srgrimes		total_bpages++;
55885632Sschweikh		free_bpages++;
5591590Srgrimes		splx(s);
5601590Srgrimes		count++;
5611590Srgrimes		numpages--;
5621590Srgrimes	}
5631590Srgrimes	return (count);
5641590Srgrimes}
5651590Srgrimes
5661590Srgrimesstatic int
5671590Srgrimesreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
5681590Srgrimes{
5691590Srgrimes	int pages;
5701590Srgrimes
5711590Srgrimes	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
5721590Srgrimes	free_bpages -= pages;
5731590Srgrimes	reserved_bpages += pages;
5741590Srgrimes	map->pagesreserved += pages;
5751590Srgrimes	pages = map->pagesneeded - map->pagesreserved;
5761590Srgrimes
5771590Srgrimes	return (pages);
5781590Srgrimes}
5791590Srgrimes
5801590Srgrimesstatic vm_offset_t
5811590Srgrimesadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
5821590Srgrimes		bus_size_t size)
5831590Srgrimes{
5841590Srgrimes	int s;
5851590Srgrimes	struct bounce_page *bpage;
5861590Srgrimes
5871590Srgrimes	if (map->pagesneeded == 0)
5881590Srgrimes		panic("add_bounce_page: map doesn't need any pages");
5891590Srgrimes	map->pagesneeded--;
5901590Srgrimes
5911590Srgrimes	if (map->pagesreserved == 0)
59293440Sdwmalone		panic("add_bounce_page: map doesn't need any pages");
5931590Srgrimes	map->pagesreserved--;
5941590Srgrimes
5951590Srgrimes	s = splhigh();
5961590Srgrimes	bpage = STAILQ_FIRST(&bounce_page_list);
5971590Srgrimes	if (bpage == NULL)
5981590Srgrimes		panic("add_bounce_page: free page list is empty");
5991590Srgrimes
6001590Srgrimes	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
6011590Srgrimes	reserved_bpages--;
6021590Srgrimes	active_bpages++;
6031590Srgrimes	splx(s);
6041590Srgrimes
6051590Srgrimes	bpage->datavaddr = vaddr;
6061590Srgrimes	bpage->datacount = size;
6071590Srgrimes	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
6081590Srgrimes	return (bpage->busaddr);
6091590Srgrimes}
6101590Srgrimes
6111590Srgrimesstatic void
6121590Srgrimesfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
6131590Srgrimes{
6141590Srgrimes	int s;
6151590Srgrimes	struct bus_dmamap *map;
6161590Srgrimes
6171590Srgrimes	bpage->datavaddr = 0;
61893440Sdwmalone	bpage->datacount = 0;
6191590Srgrimes
6201590Srgrimes	s = splhigh();
6211590Srgrimes	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
6221590Srgrimes	free_bpages++;
6231590Srgrimes	active_bpages--;
6241590Srgrimes	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
6251590Srgrimes		if (reserve_bounce_pages(map->dmat, map) == 0) {
6261590Srgrimes			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
6271590Srgrimes			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
6281590Srgrimes					   map, links);
6291590Srgrimes			busdma_swi_pending = 1;
6301590Srgrimes			setsoftvm();
6311590Srgrimes		}
6321590Srgrimes	}
6331590Srgrimes	splx(s);
6341590Srgrimes}
6351590Srgrimes
6361590Srgrimesvoid
6371590Srgrimesbusdma_swi()
6381590Srgrimes{
6391590Srgrimes	int s;
6401590Srgrimes	struct bus_dmamap *map;
6411590Srgrimes
6421590Srgrimes	s = splhigh();
6431590Srgrimes	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
6441590Srgrimes		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
6451590Srgrimes		splx(s);
6461590Srgrimes		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
6471590Srgrimes				map->callback, map->callback_arg, /*flags*/0);
6481590Srgrimes		s = splhigh();
6491590Srgrimes	}
6501590Srgrimes	splx(s);
6511590Srgrimes}
6521590Srgrimes