busdma_machdep.c revision 35256
1139749Simp/*
2119815Smarcel * Copyright (c) 1997 Justin T. Gibbs.
3119815Smarcel * All rights reserved.
4119815Smarcel *
5119815Smarcel * Redistribution and use in source and binary forms, with or without
6119815Smarcel * modification, are permitted provided that the following conditions
7119815Smarcel * are met:
8119815Smarcel * 1. Redistributions of source code must retain the above copyright
9119815Smarcel *    notice, this list of conditions, and the following disclaimer,
10119815Smarcel *    without modification, immediately at the beginning of the file.
11119815Smarcel * 2. The name of the author may not be used to endorse or promote products
12119815Smarcel *    derived from this software without specific prior written permission.
13119815Smarcel *
14119815Smarcel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15119815Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16119815Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17119815Smarcel * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18119815Smarcel * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19119815Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20119815Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21119815Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22119815Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23119815Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24119815Smarcel * SUCH DAMAGE.
25119815Smarcel *
26119815Smarcel *      $Id: busdma_machdep.c,v 1.4 1998/02/20 13:11:47 bde Exp $
27119815Smarcel */
28119815Smarcel
29119815Smarcel#include <sys/param.h>
30119815Smarcel#include <sys/systm.h>
31119815Smarcel#include <sys/malloc.h>
32119815Smarcel
33119815Smarcel#include <vm/vm.h>
34119815Smarcel#include <vm/vm_prot.h>
35119815Smarcel#include <vm/vm_page.h>
36119815Smarcel
37119815Smarcel#include <machine/bus.h>
38119815Smarcel#include <machine/md_var.h>
39119815Smarcel
40137956Smarcel#define MAX(a,b) (((a) > (b)) ? (a) : (b))
41137956Smarcel#define MIN(a,b) (((a) < (b)) ? (a) : (b))
42119815Smarcel#define MAX_BPAGES 128
43119815Smarcel
44119815Smarcelstruct bus_dma_tag {
45119815Smarcel	bus_dma_tag_t	  parent;
46160717Smarcel	bus_size_t	  boundary;
47160717Smarcel	bus_addr_t	  lowaddr;
48160717Smarcel	bus_addr_t	  highaddr;
49160717Smarcel	bus_dma_filter_t *filter;
50160717Smarcel	void		 *filterarg;
51160717Smarcel	bus_size_t	  maxsize;
52160717Smarcel	int		  nsegments;
53119815Smarcel	bus_size_t	  maxsegsz;
54119815Smarcel	int		  flags;
55119815Smarcel	int		  ref_count;
56119815Smarcel	int		  map_count;
57119815Smarcel};
58119815Smarcel
59119815Smarcelstruct bounce_page {
60119815Smarcel	vm_offset_t	vaddr;		/* kva of bounce buffer */
61119815Smarcel	bus_addr_t	busaddr;	/* Physical address */
62119815Smarcel	vm_offset_t	datavaddr;	/* kva of client data */
63119815Smarcel	bus_size_t	datacount;	/* client data count */
64119815Smarcel	STAILQ_ENTRY(bounce_page) links;
65119815Smarcel};
66119815Smarcel
67119815Smarcelint busdma_swi_pending;
68119815Smarcel
69119815Smarcelstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
70119815Smarcelstatic int free_bpages;
71119815Smarcelstatic int reserved_bpages;
72119815Smarcelstatic int active_bpages;
73119815Smarcelstatic int total_bpages;
74119815Smarcelstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
75119815Smarcel
76119815Smarcelstruct bus_dmamap {
77119815Smarcel	struct bp_list	       bpages;
78158504Smarcel	int		       pagesneeded;
79119815Smarcel	int		       pagesreserved;
80119815Smarcel	bus_dma_tag_t	       dmat;
81157451Smarcel	void		      *buf;		/* unmapped buffer pointer */
82158504Smarcel	bus_size_t	       buflen;		/* unmapped buffer length */
83119815Smarcel	bus_dmamap_callback_t *callback;
84119815Smarcel	void		      *callback_arg;
85119815Smarcel	STAILQ_ENTRY(bus_dmamap) links;
86119815Smarcel};
87119815Smarcel
88119815Smarcelstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
89119815Smarcelstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
90158504Smarcelstatic struct bus_dmamap nobounce_dmamap;
91119815Smarcel
92119815Smarcelstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
93119815Smarcelstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map);
94119815Smarcelstatic vm_offset_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
95119815Smarcel				   vm_offset_t vaddr, bus_size_t size);
96119815Smarcelstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
97119815Smarcelstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
98119815Smarcel
99119815Smarcelstatic __inline int
100119815Smarcelrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
101119815Smarcel{
102119815Smarcel	int retval;
103119815Smarcel
104119815Smarcel	retval = 0;
105119815Smarcel	do {
106119815Smarcel		if (paddr > dmat->lowaddr
107119815Smarcel		 && paddr <= dmat->highaddr
108119815Smarcel		 && (dmat->filter == NULL
109119815Smarcel		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
110119815Smarcel			retval = 1;
111119815Smarcel
112119815Smarcel		dmat = dmat->parent;
113119815Smarcel	} while (retval == 0 && dmat != NULL);
114119815Smarcel	return (retval);
115119815Smarcel}
116119815Smarcel
117119815Smarcel/*
118119815Smarcel * Allocate a device specific dma_tag.
119119815Smarcel */
120119815Smarcelint
121119815Smarcelbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t boundary,
122119815Smarcel		   bus_addr_t lowaddr, bus_addr_t highaddr,
123119815Smarcel		   bus_dma_filter_t *filter, void *filterarg,
124119815Smarcel		   bus_size_t maxsize, int nsegments, bus_size_t maxsegsz,
125119815Smarcel		   int flags, bus_dma_tag_t *dmat)
126119815Smarcel{
127119815Smarcel	bus_dma_tag_t newtag;
128119815Smarcel	int error = 0;
129158504Smarcel
130119815Smarcel	/* Return a NULL tag on failure */
131158504Smarcel	*dmat = NULL;
132158504Smarcel
133158504Smarcel	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
134160717Smarcel	if (newtag == NULL)
135158504Smarcel		return (ENOMEM);
136158504Smarcel
137158504Smarcel	newtag->parent = parent;
138119815Smarcel	newtag->boundary = boundary;
139119815Smarcel	newtag->lowaddr = trunc_page(lowaddr) + (PAGE_SIZE - 1);
140119815Smarcel	newtag->highaddr = trunc_page(highaddr) + (PAGE_SIZE - 1);
141119815Smarcel	newtag->filter = filter;
142119815Smarcel	newtag->filterarg = filterarg;
143119815Smarcel	newtag->maxsize = maxsize;
144119815Smarcel	newtag->nsegments = nsegments;
145119815Smarcel	newtag->maxsegsz = maxsegsz;
146119815Smarcel	newtag->flags = flags;
147119815Smarcel	newtag->ref_count = 1; /* Count ourself */
148119815Smarcel	newtag->map_count = 0;
149119815Smarcel
150160717Smarcel	/* Take into account any restrictions imposed by our parent tag */
151158504Smarcel	if (parent != NULL) {
152119815Smarcel		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
153119815Smarcel		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
154119815Smarcel		/*
155119815Smarcel		 * XXX Not really correct??? Probably need to honor boundary
156119815Smarcel		 *     all the way up the inheritence chain.
157119815Smarcel		 */
158119815Smarcel		newtag->boundary = MIN(parent->boundary, newtag->boundary);
159119815Smarcel		if (newtag->filter == NULL) {
160141032Smarcel			/*
161119815Smarcel			 * Short circuit looking at our parent directly
162119815Smarcel			 * since we have encapsulated all of its information
163119815Smarcel			 */
164119815Smarcel			newtag->filter = parent->filter;
165119815Smarcel			newtag->filterarg = parent->filterarg;
166120452Smarcel			newtag->parent = parent->parent;
167120452Smarcel		}
168141032Smarcel		if (newtag->parent != NULL) {
169120452Smarcel			parent->ref_count++;
170120452Smarcel		}
171141032Smarcel	}
172120452Smarcel
173120452Smarcel	if (newtag->lowaddr < ptoa(Maxmem)) {
174119815Smarcel		/* Must bounce */
175158504Smarcel
176119815Smarcel		if (lowaddr > bounce_lowaddr) {
177160717Smarcel			/*
178119815Smarcel			 * Go through the pool and kill any pages
179119815Smarcel			 * that don't reside below lowaddr.
180119815Smarcel			 */
181119815Smarcel			panic("bus_dmamap_create: page reallocation "
182119815Smarcel			      "not implemented");
183119815Smarcel		}
184119815Smarcel		if (ptoa(total_bpages) < maxsize) {
185119815Smarcel			int pages;
186119815Smarcel
187119815Smarcel			pages = atop(maxsize) - total_bpages;
188119815Smarcel
189119815Smarcel			/* Add pages to our bounce pool */
190119815Smarcel			if (alloc_bounce_pages(newtag, pages) < pages)
191119815Smarcel				error = ENOMEM;
192119815Smarcel		}
193119815Smarcel	}
194119815Smarcel
195166100Smarius	if (error != 0) {
196157380Smarcel		free(newtag, M_DEVBUF);
197119815Smarcel	} else {
198168281Smarcel		*dmat = newtag;
199119815Smarcel	}
200119815Smarcel	return (error);
201119815Smarcel}
202119815Smarcel
203166100Smariusint
204119815Smarcelbus_dma_tag_destroy(bus_dma_tag_t dmat)
205119815Smarcel{
206119815Smarcel	if (dmat != NULL) {
207119815Smarcel
208119815Smarcel		if (dmat->map_count != 0)
209119815Smarcel			return (EBUSY);
210119815Smarcel
211119815Smarcel		while (dmat != NULL) {
212119815Smarcel			bus_dma_tag_t parent;
213119815Smarcel
214119815Smarcel			parent = dmat->parent;
215119815Smarcel			dmat->ref_count--;
216119815Smarcel			if (dmat->ref_count == 0) {
217119815Smarcel				free(dmat, M_DEVBUF);
218119815Smarcel			}
219119815Smarcel			dmat = parent;
220119815Smarcel		}
221119815Smarcel	}
222119815Smarcel	return (0);
223119815Smarcel}
224119815Smarcel
225119815Smarcel/*
226119815Smarcel * Allocate a handle for mapping from kva/uva/physical
227119815Smarcel * address space into bus device space.
228119815Smarcel */
229119815Smarcelint
230119815Smarcelbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
231145603Smarcel{
232119815Smarcel	int error;
233119815Smarcel
234119815Smarcel	error = 0;
235119815Smarcel
236119815Smarcel	if (dmat->lowaddr < ptoa(Maxmem)) {
237119815Smarcel		/* Must bounce */
238166100Smarius		int maxpages;
239119815Smarcel
240119815Smarcel		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
241166100Smarius					     M_NOWAIT);
242119815Smarcel		if (*mapp == NULL) {
243119815Smarcel			error = ENOMEM;
244119815Smarcel		} else {
245157380Smarcel			/* Initialize the new map */
246119815Smarcel			bzero(*mapp, sizeof(**mapp));
247157380Smarcel			STAILQ_INIT(&((*mapp)->bpages));
248119815Smarcel		}
249157380Smarcel		/*
250157380Smarcel		 * Attempt to add pages to our pool on a per-instance
251157380Smarcel		 * basis up to a sane limit.
252157380Smarcel		 */
253157380Smarcel		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
254157380Smarcel		if (dmat->map_count > 0
255157380Smarcel		 && total_bpages < maxpages) {
256157380Smarcel			int pages;
257157380Smarcel
258157380Smarcel			pages = atop(dmat->maxsize);
259157380Smarcel			pages = MIN(maxpages - total_bpages, pages);
260157380Smarcel			alloc_bounce_pages(dmat, pages);
261157380Smarcel		}
262119815Smarcel	} else {
263119815Smarcel		*mapp = NULL;
264119815Smarcel	}
265119815Smarcel	if (error == 0)
266119815Smarcel		dmat->map_count++;
267119815Smarcel	return (error);
268119815Smarcel}
269119815Smarcel
270128911Smarcel/*
271119815Smarcel * Destroy a handle for mapping from kva/uva/physical
272119815Smarcel * address space into bus device space.
273119815Smarcel */
274119815Smarcelint
275119815Smarcelbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
276119815Smarcel{
277119815Smarcel	if (map != NULL) {
278119815Smarcel		if (STAILQ_FIRST(&map->bpages) != NULL)
279119815Smarcel			return (EBUSY);
280119815Smarcel		free(map, M_DEVBUF);
281119815Smarcel	}
282119815Smarcel	dmat->map_count--;
283119815Smarcel	return (0);
284260890Simp}
285260890Simp
286119815Smarcel#define BUS_DMAMAP_NSEGS ((BUS_SPACE_MAXSIZE / PAGE_SIZE) + 1)
287119815Smarcel
288119815Smarcel/*
289119815Smarcel * Map the buffer buf into bus space using the dmamap map.
290119815Smarcel */
291119815Smarcelint
292119815Smarcelbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
293119815Smarcel		bus_size_t buflen, bus_dmamap_callback_t *callback,
294119815Smarcel		void *callback_arg, int flags)
295119815Smarcel{
296119815Smarcel	vm_offset_t		vaddr;
297119815Smarcel	vm_offset_t		paddr;
298119815Smarcel#ifdef __GNUC__
299260890Simp	bus_dma_segment_t	dm_segments[dmat->nsegments];
300260890Simp#else
301119815Smarcel	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
302119815Smarcel#endif
303119815Smarcel	bus_dma_segment_t      *sg;
304119815Smarcel	int			seg;
305168281Smarcel	int			error;
306119815Smarcel
307119815Smarcel	error = 0;
308168281Smarcel	/*
309119815Smarcel	 * If we are being called during a callback, pagesneeded will
310281438Sandrew	 * be non-zero, so we can avoid doing the work twice.
311281438Sandrew	 */
312119815Smarcel	if (dmat->lowaddr < ptoa(Maxmem) && map->pagesneeded == 0) {
313119815Smarcel		vm_offset_t	vendaddr;
314119815Smarcel
315119815Smarcel		/*
316119815Smarcel		 * Count the number of bounce pages
317119815Smarcel		 * needed in order to complete this transfer
318119815Smarcel		 */
319119815Smarcel		vaddr = trunc_page(buf);
320119815Smarcel		vendaddr = (vm_offset_t)buf + buflen;
321119815Smarcel
322119815Smarcel		while (vaddr < vendaddr) {
323119815Smarcel			paddr = pmap_kextract(vaddr);
324119815Smarcel			if (run_filter(dmat, paddr) != 0) {
325119815Smarcel
326119815Smarcel				map->pagesneeded++;
327119815Smarcel			}
328119815Smarcel			vaddr += PAGE_SIZE;
329119815Smarcel		}
330119815Smarcel	}
331119815Smarcel
332119815Smarcel	if (map == NULL)
333119815Smarcel		map = &nobounce_dmamap;
334119815Smarcel
335119815Smarcel	/* Reserve Necessary Bounce Pages */
336119815Smarcel	if (map->pagesneeded != 0) {
337119815Smarcel		int s;
338155971Smarcel
339119815Smarcel		s = splhigh();
340119815Smarcel	 	if (reserve_bounce_pages(dmat, map) != 0) {
341119815Smarcel
342119815Smarcel			/* Queue us for resources */
343119815Smarcel			map->dmat = dmat;
344141032Smarcel			map->buf = buf;
345119815Smarcel			map->buflen = buflen;
346119815Smarcel			map->callback = callback;
347119815Smarcel			map->callback_arg = callback_arg;
348119815Smarcel
349119815Smarcel			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
350141032Smarcel			splx(s);
351141032Smarcel
352119815Smarcel			return (EINPROGRESS);
353119815Smarcel		}
354119815Smarcel		splx(s);
355119815Smarcel	}
356119815Smarcel
357119815Smarcel	vaddr = (vm_offset_t)buf;
358119815Smarcel	sg = &dm_segments[0];
359119815Smarcel	seg = 1;
360119815Smarcel	sg->ds_len = 0;
361119815Smarcel
362119815Smarcel	do {
363119815Smarcel		bus_size_t	size;
364119815Smarcel		vm_offset_t	nextpaddr;
365119815Smarcel
366119815Smarcel		paddr = pmap_kextract(vaddr);
367119815Smarcel		size = PAGE_SIZE - (paddr & PAGE_MASK);
368119815Smarcel		if (size > buflen)
369119815Smarcel			size = buflen;
370119815Smarcel
371119815Smarcel		if (map->pagesneeded != 0
372119815Smarcel		 && run_filter(dmat, paddr)) {
373119815Smarcel			paddr = add_bounce_page(dmat, map, vaddr, size);
374119815Smarcel		}
375119815Smarcel
376119815Smarcel		if (sg->ds_len == 0) {
377119815Smarcel			sg->ds_addr = paddr;
378157300Smarcel			sg->ds_len = size;
379120143Smarcel		} else if (paddr == nextpaddr) {
380157300Smarcel			sg->ds_len += size;
381131043Sphk		} else {
382131043Sphk			/* Go to the next segment */
383141032Smarcel			sg++;
384155973Smarcel			seg++;
385119815Smarcel			if (seg > dmat->nsegments)
386119815Smarcel				break;
387119815Smarcel			sg->ds_addr = paddr;
388119815Smarcel			sg->ds_len = size;
389119815Smarcel		}
390119815Smarcel		vaddr += size;
391119815Smarcel		nextpaddr = paddr + size;
392119815Smarcel		buflen -= size;
393119815Smarcel	} while (buflen > 0);
394160716Smarcel
395119815Smarcel	if (buflen != 0) {
396119815Smarcel		printf("bus_dmamap_load: Too many segs!\n");
397120143Smarcel		error = EFBIG;
398157300Smarcel	}
399119815Smarcel
400119815Smarcel	(*callback)(callback_arg, dm_segments, seg, error);
401119815Smarcel
402119815Smarcel	return (0);
403119815Smarcel}
404119815Smarcel
405119815Smarcel/*
406119815Smarcel * Release the mapping held by map.
407119815Smarcel */
408160716Smarcelvoid
409160716Smarcel_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
410160716Smarcel{
411160716Smarcel	struct bounce_page *bpage;
412160716Smarcel
413160716Smarcel	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
414119815Smarcel		STAILQ_REMOVE_HEAD(&map->bpages, links);
415120143Smarcel		free_bounce_page(dmat, bpage);
416120143Smarcel	}
417119815Smarcel}
418157300Smarcel
419120143Smarcelvoid
420119815Smarcel_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
421119815Smarcel{
422119815Smarcel	struct bounce_page *bpage;
423119815Smarcel
424119815Smarcel	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
425128911Smarcel
426119815Smarcel		/*
427119815Smarcel		 * Handle data bouncing.  We might also
428119815Smarcel		 * want to add support for invalidating
429141032Smarcel		 * the caches on broken hardware
430119815Smarcel		 */
431119815Smarcel		switch (op) {
432119815Smarcel		case BUS_DMASYNC_PREWRITE:
433141032Smarcel			while (bpage != NULL) {
434157300Smarcel				bcopy((void *)bpage->datavaddr,
435141032Smarcel				      (void *)bpage->vaddr,
436141032Smarcel				      bpage->datacount);
437141032Smarcel				bpage = STAILQ_NEXT(bpage, links);
438141032Smarcel			}
439141032Smarcel			break;
440141032Smarcel
441141032Smarcel		case BUS_DMASYNC_POSTREAD:
442141032Smarcel			while (bpage != NULL) {
443141032Smarcel				bcopy((void *)bpage->vaddr,
444141032Smarcel				      (void *)bpage->datavaddr,
445141032Smarcel				      bpage->datacount);
446141032Smarcel				bpage = STAILQ_NEXT(bpage, links);
447141032Smarcel			}
448141032Smarcel			break;
449141032Smarcel		case BUS_DMASYNC_PREREAD:
450141032Smarcel		case BUS_DMASYNC_POSTWRITE:
451119815Smarcel			/* No-ops */
452141032Smarcel			break;
453141032Smarcel		}
454155971Smarcel	}
455141032Smarcel}
456141032Smarcel
457119815Smarcelstatic int
458141032Smarcelalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
459141032Smarcel{
460155971Smarcel	int count;
461155971Smarcel
462141032Smarcel	count = 0;
463119815Smarcel	if (total_bpages == 0) {
464141032Smarcel		STAILQ_INIT(&bounce_page_list);
465141032Smarcel		STAILQ_INIT(&bounce_map_waitinglist);
466141032Smarcel		STAILQ_INIT(&bounce_map_callbacklist);
467141032Smarcel	}
468141032Smarcel
469141032Smarcel	while (numpages > 0) {
470155971Smarcel		struct bounce_page *bpage;
471141032Smarcel		int s;
472141032Smarcel
473141032Smarcel		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
474141032Smarcel						     M_NOWAIT);
475155973Smarcel
476155971Smarcel		if (bpage == NULL)
477141032Smarcel			break;
478141032Smarcel		bzero(bpage, sizeof(*bpage));
479141032Smarcel		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
480141032Smarcel							 M_NOWAIT, 0ul,
481155971Smarcel							 dmat->lowaddr,
482141032Smarcel							 PAGE_SIZE, 0x10000);
483119815Smarcel		if (bpage->vaddr == NULL) {
484141032Smarcel			free(bpage, M_DEVBUF);
485141032Smarcel			break;
486141032Smarcel		}
487141032Smarcel		bpage->busaddr = pmap_kextract(bpage->vaddr);
488141032Smarcel		s = splhigh();
489141032Smarcel		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
490157300Smarcel		total_bpages++;
491141032Smarcel		free_bpages++;
492119815Smarcel		splx(s);
493119815Smarcel		count++;
494119815Smarcel		numpages--;
495119815Smarcel	}
496119815Smarcel	return (count);
497119815Smarcel}
498119815Smarcel
499119815Smarcelstatic int
500119815Smarcelreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map)
501119815Smarcel{
502157300Smarcel	int pages;
503119815Smarcel
504119815Smarcel	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
505157300Smarcel	free_bpages -= pages;
506119815Smarcel	reserved_bpages += pages;
507119815Smarcel	map->pagesreserved += pages;
508119815Smarcel	pages = map->pagesneeded - map->pagesreserved;
509119815Smarcel
510119815Smarcel	return (pages);
511119815Smarcel}
512119815Smarcel
513119815Smarcelstatic vm_offset_t
514120452Smarceladd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
515119815Smarcel		bus_size_t size)
516119815Smarcel{
517119815Smarcel	int s;
518119815Smarcel	struct bounce_page *bpage;
519119815Smarcel
520248965Sian	if (map->pagesneeded == 0)
521248965Sian		panic("add_bounce_page: map doesn't need any pages");
522248965Sian	map->pagesneeded--;
523120452Smarcel
524119815Smarcel	if (map->pagesreserved == 0)
525120452Smarcel		panic("add_bounce_page: map doesn't need any pages");
526119815Smarcel	map->pagesreserved--;
527119815Smarcel
528119815Smarcel	s = splhigh();
529119815Smarcel	bpage = STAILQ_FIRST(&bounce_page_list);
530119815Smarcel	if (bpage == NULL)
531119815Smarcel		panic("add_bounce_page: free page list is empty");
532119815Smarcel
533119815Smarcel	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
534119815Smarcel	reserved_bpages--;
535119815Smarcel	active_bpages++;
536119815Smarcel	splx(s);
537119815Smarcel
538157300Smarcel	bpage->datavaddr = vaddr;
539119815Smarcel	bpage->datacount = size;
540120146Smarcel	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
541120146Smarcel	return (bpage->busaddr);
542120146Smarcel}
543120146Smarcel
544120146Smarcelstatic void
545141032Smarcelfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
546141032Smarcel{
547119815Smarcel	int s;
548119815Smarcel	struct bus_dmamap *map;
549119815Smarcel
550119815Smarcel	bpage->datavaddr = 0;
551119815Smarcel	bpage->datacount = 0;
552141032Smarcel
553141032Smarcel	s = splhigh();
554119815Smarcel	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
555141032Smarcel	free_bpages++;
556119815Smarcel	active_bpages--;
557120146Smarcel	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
558120146Smarcel		if (reserve_bounce_pages(map->dmat, map) == 0) {
559119815Smarcel			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
560119815Smarcel			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
561120146Smarcel					   map, links);
562120146Smarcel			busdma_swi_pending = 1;
563141032Smarcel			setsoftvm();
564141032Smarcel		}
565120146Smarcel	}
566141032Smarcel	splx(s);
567120146Smarcel}
568120146Smarcel
569120146Smarcelvoid
570120146Smarcelbusdma_swi()
571120146Smarcel{
572157300Smarcel	int s;
573119815Smarcel	struct bus_dmamap *map;
574119815Smarcel
575119815Smarcel	s = splhigh();
576119815Smarcel	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
577119815Smarcel		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
578119815Smarcel		splx(s);
579119815Smarcel		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
580119815Smarcel				map->callback, map->callback_arg, /*flags*/0);
581119815Smarcel		s = splhigh();
582119815Smarcel	}
583119815Smarcel	splx(s);
584119815Smarcel}
585119815Smarcel