busdma_machdep.c revision 113347
1204076Spjd/*
2204076Spjd * Copyright (c) 1997, 1998 Justin T. Gibbs.
3211877Spjd * All rights reserved.
4204076Spjd *
5204076Spjd * Redistribution and use in source and binary forms, with or without
6204076Spjd * modification, are permitted provided that the following conditions
7204076Spjd * are met:
8204076Spjd * 1. Redistributions of source code must retain the above copyright
9204076Spjd *    notice, this list of conditions, and the following disclaimer,
10204076Spjd *    without modification, immediately at the beginning of the file.
11204076Spjd * 2. The name of the author may not be used to endorse or promote products
12204076Spjd *    derived from this software without specific prior written permission.
13204076Spjd *
14204076Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15204076Spjd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16204076Spjd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17204076Spjd * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18204076Spjd * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19204076Spjd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20204076Spjd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21204076Spjd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22204076Spjd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23204076Spjd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24204076Spjd * SUCH DAMAGE.
25204076Spjd *
26204076Spjd * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 113347 2003-04-10 23:03:33Z mux $
27204076Spjd */
28204076Spjd
29204076Spjd#include <sys/param.h>
30204076Spjd#include <sys/systm.h>
31204076Spjd#include <sys/malloc.h>
32204076Spjd#include <sys/bus.h>
33204076Spjd#include <sys/interrupt.h>
34204076Spjd#include <sys/kernel.h>
35204076Spjd#include <sys/lock.h>
36204076Spjd#include <sys/proc.h>
37204076Spjd#include <sys/mutex.h>
38204076Spjd#include <sys/mbuf.h>
39204076Spjd#include <sys/uio.h>
40204076Spjd
41204076Spjd#include <vm/vm.h>
42204076Spjd#include <vm/vm_page.h>
43204076Spjd#include <vm/vm_map.h>
44204076Spjd
45213009Spjd#include <machine/atomic.h>
46204076Spjd#include <machine/bus.h>
47204076Spjd#include <machine/md_var.h>
48204076Spjd
49204076Spjd#define MAX_BPAGES 512
50204076Spjd
51204076Spjdstruct bus_dma_tag {
52204076Spjd	bus_dma_tag_t	  parent;
53204076Spjd	bus_size_t	  alignment;
54204076Spjd	bus_size_t	  boundary;
55204076Spjd	bus_addr_t	  lowaddr;
56204076Spjd	bus_addr_t	  highaddr;
57212038Spjd	bus_dma_filter_t *filter;
58204076Spjd	void		 *filterarg;
59204076Spjd	bus_size_t	  maxsize;
60204076Spjd	u_int		  nsegments;
61211977Spjd	bus_size_t	  maxsegsz;
62204076Spjd	int		  flags;
63204076Spjd	int		  ref_count;
64204076Spjd	int		  map_count;
65204076Spjd};
66204076Spjd
67204076Spjdstruct bounce_page {
68219864Spjd	vm_offset_t	vaddr;		/* kva of bounce buffer */
69219864Spjd	bus_addr_t	busaddr;	/* Physical address */
70204076Spjd	vm_offset_t	datavaddr;	/* kva of client data */
71204076Spjd	bus_size_t	datacount;	/* client data count */
72204076Spjd	STAILQ_ENTRY(bounce_page) links;
73204076Spjd};
74246922Spjd
75204076Spjdint busdma_swi_pending;
76204076Spjd
77204076Spjdstatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
78211984Spjdstatic int free_bpages;
79211984Spjdstatic int reserved_bpages;
80204076Spjdstatic int active_bpages;
81204076Spjdstatic int total_bpages;
82204076Spjdstatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
83204076Spjd
84204076Spjdstruct bus_dmamap {
85260006Strociny	struct bp_list	       bpages;
86204076Spjd	int		       pagesneeded;
87204076Spjd	int		       pagesreserved;
88204076Spjd	bus_dma_tag_t	       dmat;
89255717Strociny	void		      *buf;		/* unmapped buffer pointer */
90204076Spjd	bus_size_t	       buflen;		/* unmapped buffer length */
91204076Spjd	bus_dmamap_callback_t *callback;
92260006Strociny	void		      *callback_arg;
93204076Spjd	STAILQ_ENTRY(bus_dmamap) links;
94204076Spjd};
95204076Spjd
96255717Strocinystatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
97204076Spjdstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
98204076Spjdstatic struct bus_dmamap nobounce_dmamap;
99260006Strociny
100204076Spjdstatic void init_bounce_pages(void *dummy);
101204076Spjdstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
102204076Spjdstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
103204076Spjd    				int commit);
104204076Spjdstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
105204076Spjd				   vm_offset_t vaddr, bus_size_t size);
106204076Spjdstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
107204076Spjdstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
108204076Spjd
109204076Spjd/* To protect all the the bounce pages related lists and data. */
110204076Spjdstatic struct mtx bounce_lock;
111204076Spjd
112211877Spjd/*
113211877Spjd * Return true if a match is made.
114260006Strociny *
115260006Strociny * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
116211877Spjd *
117260006Strociny * If paddr is within the bounds of the dma tag then call the filter callback
118211877Spjd * to check for a match, if there is no filter callback then assume a match.
119211877Spjd */
120211877Spjdstatic __inline int
121211877Spjdrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
122211877Spjd{
123211877Spjd	int retval;
124211877Spjd
125211877Spjd	retval = 0;
126260006Strociny	do {
127260006Strociny		if (paddr > dmat->lowaddr
128211877Spjd		 && paddr <= dmat->highaddr
129211877Spjd		 && (dmat->filter == NULL
130211877Spjd		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
131226861Spjd			retval = 1;
132226854Spjd
133260006Strociny		dmat = dmat->parent;
134260006Strociny	} while (retval == 0 && dmat != NULL);
135260006Strociny	return (retval);
136260006Strociny}
137260006Strociny
138260006Strociny#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
139260006Strociny/*
140260006Strociny * Allocate a device specific dma_tag.
141260006Strociny */
142226854Spjdint
143226854Spjdbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
144211877Spjd		   bus_size_t boundary, bus_addr_t lowaddr,
145226854Spjd		   bus_addr_t highaddr, bus_dma_filter_t *filter,
146226854Spjd		   void *filterarg, bus_size_t maxsize, int nsegments,
147226854Spjd		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
148226854Spjd{
149226854Spjd	bus_dma_tag_t newtag;
150246922Spjd	int error = 0;
151226854Spjd
152226854Spjd	/* Return a NULL tag on failure */
153204076Spjd	*dmat = NULL;
154246922Spjd
155246922Spjd	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
156246922Spjd	if (newtag == NULL)
157246922Spjd		return (ENOMEM);
158246922Spjd
159246922Spjd	newtag->parent = parent;
160246922Spjd	newtag->alignment = alignment;
161246922Spjd	newtag->boundary = boundary;
162246922Spjd	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
163246922Spjd	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
164246922Spjd	    (PAGE_SIZE - 1);
165246922Spjd	newtag->filter = filter;
166246922Spjd	newtag->filterarg = filterarg;
167246922Spjd	newtag->maxsize = maxsize;
168246922Spjd	newtag->nsegments = nsegments;
169204076Spjd	newtag->maxsegsz = maxsegsz;
170204076Spjd	newtag->flags = flags;
171204076Spjd	newtag->ref_count = 1; /* Count ourself */
172204076Spjd	newtag->map_count = 0;
173204076Spjd
174204076Spjd	/* Take into account any restrictions imposed by our parent tag */
175204076Spjd	if (parent != NULL) {
176204076Spjd		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
177204076Spjd		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
178204076Spjd		/*
179204076Spjd		 * XXX Not really correct??? Probably need to honor boundary
180204076Spjd		 *     all the way up the inheritence chain.
181204076Spjd		 */
182204076Spjd		newtag->boundary = MAX(parent->boundary, newtag->boundary);
183204076Spjd		if (newtag->filter == NULL) {
184204076Spjd			/*
185204076Spjd			 * Short circuit looking at our parent directly
186204076Spjd			 * since we have encapsulated all of its information
187204076Spjd			 */
188204076Spjd			newtag->filter = parent->filter;
189204076Spjd			newtag->filterarg = parent->filterarg;
190204076Spjd			newtag->parent = parent->parent;
191204076Spjd		}
192204076Spjd		if (newtag->parent != NULL)
193210879Spjd			atomic_add_int(&parent->ref_count, 1);
194210879Spjd	}
195210879Spjd
196204076Spjd	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
197204076Spjd	    (flags & BUS_DMA_ALLOCNOW) != 0) {
198204076Spjd		/* Must bounce */
199210879Spjd
200210879Spjd		if (lowaddr > bounce_lowaddr) {
201210879Spjd			/*
202204076Spjd			 * Go through the pool and kill any pages
203226854Spjd			 * that don't reside below lowaddr.
204204076Spjd			 */
205260006Strociny			panic("bus_dma_tag_create: page reallocation "
206204076Spjd			      "not implemented");
207204076Spjd		}
208204076Spjd		if (ptoa(total_bpages) < maxsize) {
209204076Spjd			int pages;
210204076Spjd
211204076Spjd			pages = atop(maxsize) - total_bpages;
212204076Spjd
213229945Spjd			/* Add pages to our bounce pool */
214204076Spjd			if (alloc_bounce_pages(newtag, pages) < pages)
215204076Spjd				error = ENOMEM;
216204076Spjd		}
217204076Spjd		/* Performed initial allocation */
218204076Spjd		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
219204076Spjd	}
220204076Spjd
221204076Spjd	if (error != 0) {
222204076Spjd		free(newtag, M_DEVBUF);
223204076Spjd	} else {
224204076Spjd		*dmat = newtag;
225223181Strociny	}
226220271Spjd	return (error);
227220271Spjd}
228220271Spjd
229223181Strocinyint
230220271Spjdbus_dma_tag_destroy(bus_dma_tag_t dmat)
231204076Spjd{
232204076Spjd	if (dmat != NULL) {
233204076Spjd
234204076Spjd		if (dmat->map_count != 0)
235204076Spjd			return (EBUSY);
236204076Spjd
237204076Spjd		while (dmat != NULL) {
238204076Spjd			bus_dma_tag_t parent;
239204076Spjd
240204076Spjd			parent = dmat->parent;
241204076Spjd			atomic_subtract_int(&dmat->ref_count, 1);
242204076Spjd			if (dmat->ref_count == 0) {
243204076Spjd				free(dmat, M_DEVBUF);
244204076Spjd				/*
245204076Spjd				 * Last reference count, so
246204076Spjd				 * release our reference
247204076Spjd				 * count on our parent.
248204076Spjd				 */
249204076Spjd				dmat = parent;
250204076Spjd			} else
251204076Spjd				dmat = NULL;
252204076Spjd		}
253204076Spjd	}
254204076Spjd	return (0);
255204076Spjd}
256204076Spjd
257204076Spjd/*
258204076Spjd * Allocate a handle for mapping from kva/uva/physical
259204076Spjd * address space into bus device space.
260204076Spjd */
261204076Spjdint
262204076Spjdbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
263204076Spjd{
264204076Spjd	int error;
265204076Spjd
266204076Spjd	error = 0;
267204076Spjd
268204076Spjd	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
269204076Spjd		/* Must bounce */
270204076Spjd		int maxpages;
271204076Spjd
272204076Spjd		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
273204076Spjd					     M_NOWAIT | M_ZERO);
274204076Spjd		if (*mapp == NULL)
275204076Spjd			return (ENOMEM);
276204076Spjd
277204076Spjd		/* Initialize the new map */
278204076Spjd		STAILQ_INIT(&((*mapp)->bpages));
279204076Spjd
280204076Spjd		/*
281204076Spjd		 * Attempt to add pages to our pool on a per-instance
282204076Spjd		 * basis up to a sane limit.
283204076Spjd		 */
284204076Spjd		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
285204076Spjd		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
286204076Spjd		 || (dmat->map_count > 0
287214284Spjd		  && total_bpages < maxpages)) {
288214284Spjd			int pages;
289214284Spjd
290214284Spjd			if (dmat->lowaddr > bounce_lowaddr) {
291204076Spjd				/*
292218138Spjd				 * Go through the pool and kill any pages
293204076Spjd				 * that don't reside below lowaddr.
294229945Spjd				 */
295204076Spjd				panic("bus_dmamap_create: page reallocation "
296214284Spjd				      "not implemented");
297214284Spjd			}
298214284Spjd			pages = MAX(atop(dmat->maxsize), 1);
299214284Spjd			pages = MIN(maxpages - total_bpages, pages);
300214284Spjd			if (alloc_bounce_pages(dmat, pages) < pages)
301214284Spjd				error = ENOMEM;
302214284Spjd
303220865Spjd			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
304204076Spjd				if (error == 0)
305219830Spjd					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
306219830Spjd			} else {
307219830Spjd				error = 0;
308226854Spjd			}
309219830Spjd		}
310219830Spjd	} else {
311219830Spjd		*mapp = NULL;
312219830Spjd	}
313219830Spjd	if (error == 0)
314230092Spjd		dmat->map_count++;
315230092Spjd	return (error);
316230092Spjd}
317230092Spjd
318219830Spjd/*
319219830Spjd * Destroy a handle for mapping from kva/uva/physical
320219831Spjd * address space into bus device space.
321219830Spjd */
322204076Spjdint
323226842Spjdbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
324204076Spjd{
325204076Spjd	if (map != NULL) {
326226842Spjd		if (STAILQ_FIRST(&map->bpages) != NULL)
327204076Spjd			return (EBUSY);
328204076Spjd		free(map, M_DEVBUF);
329226842Spjd	}
330204076Spjd	dmat->map_count--;
331204076Spjd	return (0);
332204076Spjd}
333204076Spjd
334204076Spjd
335204076Spjd/*
336204076Spjd * Allocate a piece of memory that can be efficiently mapped into
337204076Spjd * bus device space based on the constraints lited in the dma tag.
338204076Spjd * A dmamap to for use with dmamap_load is also allocated.
339204076Spjd */
340204076Spjdint
341204076Spjdbus_dmamem_alloc_size(bus_dma_tag_t dmat, void** vaddr, int flags,
342204076Spjd		      bus_dmamap_t *mapp, bus_size_t size)
343204076Spjd{
344204076Spjd
345204076Spjd	if (size > dmat->maxsize)
346204076Spjd		return (ENOMEM);
347204076Spjd
348204076Spjd	/* If we succeed, no mapping/bouncing will be required */
349204076Spjd	*mapp = NULL;
350204076Spjd
351204076Spjd	if ((size <= PAGE_SIZE) &&
352204076Spjd	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
353204076Spjd		*vaddr = malloc(size, M_DEVBUF,
354204076Spjd				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
355204076Spjd	} else {
356204076Spjd		/*
357204076Spjd		 * XXX Use Contigmalloc until it is merged into this facility
358226854Spjd		 *     and handles multi-seg allocations.  Nobody is doing
359204076Spjd		 *     multi-seg allocations yet though.
360204076Spjd		 */
361230092Spjd		mtx_lock(&Giant);
362230092Spjd		*vaddr = contigmalloc(size, M_DEVBUF,
363230092Spjd		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
364230092Spjd		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
365226854Spjd		    dmat->boundary);
366226854Spjd		mtx_unlock(&Giant);
367226854Spjd	}
368226854Spjd	if (*vaddr == NULL)
369226854Spjd		return (ENOMEM);
370226854Spjd	return (0);
371204076Spjd}
372204076Spjd
373204076Spjdint
374204076Spjdbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
375204076Spjd		 bus_dmamap_t *mapp)
376204076Spjd{
377218138Spjd	return (bus_dmamem_alloc_size(dmat, vaddr, flags, mapp, dmat->maxsize));
378204076Spjd}
379204076Spjd
380204076Spjd/*
381204076Spjd * Free a piece of memory and it's allociated dmamap, that was allocated
382204076Spjd * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
383204076Spjd */
384204076Spjdvoid
385204076Spjdbus_dmamem_free_size(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map,
386204076Spjd		     bus_size_t size)
387204076Spjd{
388204076Spjd	/*
389204076Spjd	 * dmamem does not need to be bounced, so the map should be
390204076Spjd	 * NULL
391204076Spjd	 */
392204076Spjd	if (map != NULL)
393204076Spjd		panic("bus_dmamem_free: Invalid map freed\n");
394204076Spjd	if ((size <= PAGE_SIZE) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
395204076Spjd		free(vaddr, M_DEVBUF);
396220007Spjd	else {
397229945Spjd		mtx_lock(&Giant);
398214276Spjd		contigfree(vaddr, size, M_DEVBUF);
399204076Spjd		mtx_unlock(&Giant);
400204076Spjd	}
401214275Spjd}
402214275Spjd
403209182Spjdvoid
404223181Strocinybus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
405220271Spjd{
406220271Spjd	bus_dmamem_free_size(dmat, vaddr, map, dmat->maxsize);
407220271Spjd}
408223181Strociny
409204076Spjd#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1)
410204076Spjd
411204076Spjd/*
412204076Spjd * Map the buffer buf into bus space using the dmamap map.
413204076Spjd */
414213009Spjdint
415204076Spjdbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
416204076Spjd		bus_size_t buflen, bus_dmamap_callback_t *callback,
417219482Strociny		void *callback_arg, int flags)
418204076Spjd{
419204076Spjd	vm_offset_t		vaddr;
420204076Spjd	vm_paddr_t		paddr;
421204076Spjd#ifdef __GNUC__
422229945Spjd	bus_dma_segment_t	dm_segments[dmat->nsegments];
423204076Spjd#else
424204076Spjd	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
425204076Spjd#endif
426204076Spjd	bus_dma_segment_t      *sg;
427212038Spjd	int			seg;
428212038Spjd	int			error;
429212038Spjd	vm_paddr_t		nextpaddr;
430229945Spjd
431212038Spjd	if (map == NULL)
432212038Spjd		map = &nobounce_dmamap;
433212038Spjd
434212038Spjd	error = 0;
435204076Spjd	/*
436204076Spjd	 * If we are being called during a callback, pagesneeded will
437229744Spjd	 * be non-zero, so we can avoid doing the work twice.
438204076Spjd	 */
439204076Spjd	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
440204076Spjd	    map->pagesneeded == 0) {
441204076Spjd		vm_offset_t	vendaddr;
442204076Spjd
443204076Spjd		/*
444204076Spjd		 * Count the number of bounce pages
445204076Spjd		 * needed in order to complete this transfer
446204076Spjd		 */
447204076Spjd		vaddr = trunc_page((vm_offset_t)buf);
448212038Spjd		vendaddr = (vm_offset_t)buf + buflen;
449212038Spjd
450218043Spjd		while (vaddr < vendaddr) {
451218043Spjd			paddr = pmap_kextract(vaddr);
452204076Spjd			if (run_filter(dmat, paddr) != 0) {
453204076Spjd
454204076Spjd				map->pagesneeded++;
455211977Spjd			}
456211984Spjd			vaddr += PAGE_SIZE;
457260006Strociny		}
458218043Spjd	}
459219482Strociny
460211984Spjd	/* Reserve Necessary Bounce Pages */
461218043Spjd	if (map->pagesneeded != 0) {
462218043Spjd		mtx_lock(&bounce_lock);
463218043Spjd	 	if (reserve_bounce_pages(dmat, map, 1) != 0) {
464218043Spjd
465218043Spjd			/* Queue us for resources */
466204076Spjd			map->dmat = dmat;
467218045Spjd			map->buf = buf;
468218045Spjd			map->buflen = buflen;
469218043Spjd			map->callback = callback;
470219482Strociny			map->callback_arg = callback_arg;
471218043Spjd
472220005Spjd			STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links);
473204076Spjd			mtx_unlock(&bounce_lock);
474213009Spjd			return (EINPROGRESS);
475213009Spjd		}
476210880Spjd		mtx_unlock(&bounce_lock);
477207371Spjd	}
478229945Spjd
479207371Spjd	vaddr = (vm_offset_t)buf;
480229945Spjd	sg = &dm_segments[0];
481207371Spjd	seg = 1;
482207371Spjd	sg->ds_len = 0;
483204076Spjd
484213007Spjd	nextpaddr = 0;
485213007Spjd	do {
486221899Spjd		bus_size_t	size;
487218049Spjd
488218214Spjd		paddr = pmap_kextract(vaddr);
489218049Spjd		size = PAGE_SIZE - (paddr & PAGE_MASK);
490213007Spjd		if (size > buflen)
491213007Spjd			size = buflen;
492213007Spjd
493213007Spjd		if (map->pagesneeded != 0 && run_filter(dmat, paddr)) {
494213007Spjd			paddr = add_bounce_page(dmat, map, vaddr, size);
495213007Spjd		}
496213007Spjd
497213007Spjd		if (sg->ds_len == 0) {
498213007Spjd			sg->ds_addr = paddr;
499218138Spjd			sg->ds_len = size;
500213007Spjd		} else if (paddr == nextpaddr) {
501204076Spjd			sg->ds_len += size;
502212038Spjd		} else {
503204076Spjd			/* Go to the next segment */
504204076Spjd			sg++;
505218138Spjd			seg++;
506204076Spjd			if (seg > dmat->nsegments)
507218138Spjd				break;
508213007Spjd			sg->ds_addr = paddr;
509204076Spjd			sg->ds_len = size;
510204076Spjd		}
511204076Spjd		vaddr += size;
512230092Spjd		nextpaddr = paddr + size;
513230092Spjd		buflen -= size;
514204076Spjd
515204076Spjd	} while (buflen > 0);
516204076Spjd
517204076Spjd	if (buflen != 0) {
518204076Spjd		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
519204076Spjd		       (u_long)buflen);
520204076Spjd		error = EFBIG;
521204076Spjd	}
522204076Spjd
523204076Spjd	(*callback)(callback_arg, dm_segments, seg, error);
524204076Spjd
525204076Spjd	return (0);
526204076Spjd}
527204076Spjd
528204076Spjd/*
529204076Spjd * Utility function to load a linear buffer.  lastaddrp holds state
530204076Spjd * between invocations (for multiple-buffer loads).  segp contains
531204076Spjd * the starting segment on entrace, and the ending segment on exit.
532204076Spjd * first indicates if this is the first invocation of this function.
533204076Spjd */
534204076Spjdstatic int
535204076Spjd_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
536204076Spjd    			bus_dmamap_t map,
537204076Spjd			bus_dma_segment_t segs[],
538204076Spjd			void *buf, bus_size_t buflen,
539204076Spjd			struct thread *td,
540204076Spjd			int flags,
541204076Spjd			bus_addr_t *lastaddrp,
542211882Spjd			int *segp,
543211882Spjd			int first)
544211882Spjd{
545204076Spjd	bus_size_t sgsize;
546204076Spjd	bus_addr_t curaddr, lastaddr, baddr, bmask;
547204076Spjd	vm_offset_t vaddr;
548204076Spjd	bus_addr_t paddr;
549204076Spjd	int needbounce = 0;
550204076Spjd	int seg;
551204076Spjd	pmap_t pmap;
552204076Spjd
553204076Spjd	if (map == NULL)
554204076Spjd		map = &nobounce_dmamap;
555226854Spjd
556204076Spjd	if (td != NULL)
557204076Spjd		pmap = vmspace_pmap(td->td_proc->p_vmspace);
558226854Spjd	else
559204076Spjd		pmap = NULL;
560204076Spjd
561204076Spjd	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
562204076Spjd		vm_offset_t	vendaddr;
563204076Spjd
564226854Spjd		/*
565226854Spjd		 * Count the number of bounce pages
566226854Spjd		 * needed in order to complete this transfer
567226854Spjd		 */
568226854Spjd		vaddr = trunc_page((vm_offset_t)buf);
569226854Spjd		vendaddr = (vm_offset_t)buf + buflen;
570226854Spjd
571226854Spjd		while (vaddr < vendaddr) {
572204076Spjd			paddr = pmap_kextract(vaddr);
573222164Spjd			if (run_filter(dmat, paddr) != 0) {
574211882Spjd				needbounce = 1;
575211882Spjd				map->pagesneeded++;
576246922Spjd			}
577246922Spjd			vaddr += PAGE_SIZE;
578246922Spjd		}
579204076Spjd	}
580204076Spjd
581226854Spjd	vaddr = (vm_offset_t)buf;
582226854Spjd
583204076Spjd	/* Reserve Necessary Bounce Pages */
584204076Spjd	if (map->pagesneeded != 0) {
585204076Spjd		mtx_lock(&bounce_lock);
586204076Spjd		if (reserve_bounce_pages(dmat, map, 0) != 0) {
587226854Spjd			mtx_unlock(&bounce_lock);
588226854Spjd			return (ENOMEM);
589204076Spjd		}
590204076Spjd		mtx_unlock(&bounce_lock);
591204076Spjd	}
592204076Spjd
593204076Spjd	lastaddr = *lastaddrp;
594204076Spjd	bmask = ~(dmat->boundary - 1);
595204076Spjd
596204076Spjd	for (seg = *segp; buflen > 0 ; ) {
597204076Spjd		/*
598248294Spjd		 * Get the physical address for this segment.
599204076Spjd		 */
600204076Spjd		if (pmap)
601204076Spjd			curaddr = pmap_extract(pmap, vaddr);
602204076Spjd		else
603204076Spjd			curaddr = pmap_kextract(vaddr);
604204076Spjd
605204076Spjd		/*
606204076Spjd		 * Compute the segment size, and adjust counts.
607204076Spjd		 */
608204076Spjd		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
609204076Spjd		if (buflen < sgsize)
610204076Spjd			sgsize = buflen;
611204076Spjd
612204076Spjd		/*
613204076Spjd		 * Make sure we don't cross any boundaries.
614204076Spjd		 */
615204076Spjd		if (dmat->boundary > 0) {
616204076Spjd			baddr = (curaddr + dmat->boundary) & bmask;
617204076Spjd			if (sgsize > (baddr - curaddr))
618204076Spjd				sgsize = (baddr - curaddr);
619204076Spjd		}
620204076Spjd
621204076Spjd		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
622204076Spjd			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
623204076Spjd
624204076Spjd		/*
625204076Spjd		 * Insert chunk into a segment, coalescing with
626204076Spjd		 * previous segment if possible.
627204076Spjd		 */
628204076Spjd		if (first) {
629204076Spjd			segs[seg].ds_addr = curaddr;
630204076Spjd			segs[seg].ds_len = sgsize;
631204076Spjd			first = 0;
632204076Spjd		} else {
633204076Spjd			if (needbounce == 0 && curaddr == lastaddr &&
634204076Spjd			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
635204076Spjd			    (dmat->boundary == 0 ||
636212899Spjd			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
637211984Spjd				segs[seg].ds_len += sgsize;
638211984Spjd			else {
639211984Spjd				if (++seg >= dmat->nsegments)
640211984Spjd					break;
641218138Spjd				segs[seg].ds_addr = curaddr;
642211984Spjd				segs[seg].ds_len = sgsize;
643211984Spjd			}
644211984Spjd		}
645212038Spjd
646211984Spjd		lastaddr = curaddr + sgsize;
647211984Spjd		vaddr += sgsize;
648211984Spjd		buflen -= sgsize;
649204076Spjd	}
650204076Spjd
651204076Spjd	*segp = seg;
652204076Spjd	*lastaddrp = lastaddr;
653204076Spjd
654204076Spjd	/*
655204076Spjd	 * Did we fit?
656246922Spjd	 */
657226854Spjd	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
658204076Spjd}
659204076Spjd
660204076Spjd/*
661211877Spjd * Like _bus_dmamap_load(), but for mbufs.
662204076Spjd */
663229945Spjdint
664211984Spjdbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
665204076Spjd		     struct mbuf *m0,
666204076Spjd		     bus_dmamap_callback2_t *callback, void *callback_arg,
667226854Spjd		     int flags)
668226854Spjd{
669211877Spjd#ifdef __GNUC__
670211877Spjd	bus_dma_segment_t dm_segments[dmat->nsegments];
671211877Spjd#else
672211877Spjd	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
673211877Spjd#endif
674211877Spjd	int nsegs, error;
675222228Spjd
676222228Spjd	KASSERT(m0->m_flags & M_PKTHDR,
677222228Spjd		("bus_dmamap_load_mbuf: no packet header"));
678222228Spjd
679222228Spjd	nsegs = 0;
680222228Spjd	error = 0;
681222228Spjd	if (m0->m_pkthdr.len <= dmat->maxsize) {
682222228Spjd		int first = 1;
683222228Spjd		bus_addr_t lastaddr = 0;
684222228Spjd		struct mbuf *m;
685222228Spjd
686222228Spjd		for (m = m0; m != NULL && error == 0; m = m->m_next) {
687222228Spjd			if (m->m_len > 0) {
688226854Spjd				error = _bus_dmamap_load_buffer(dmat, map,
689226854Spjd						dm_segments,
690226854Spjd						m->m_data, m->m_len,
691226854Spjd						NULL, flags, &lastaddr,
692226854Spjd						&nsegs, first);
693222228Spjd				first = 0;
694204076Spjd			}
695204076Spjd		}
696211882Spjd	} else {
697226854Spjd		error = EINVAL;
698211882Spjd	}
699211882Spjd
700211882Spjd	if (error) {
701226854Spjd		/* force "no valid mappings" in callback */
702211882Spjd		(*callback)(callback_arg, dm_segments, 0, 0, error);
703211882Spjd	} else {
704211882Spjd		(*callback)(callback_arg, dm_segments,
705226854Spjd			    nsegs+1, m0->m_pkthdr.len, error);
706229945Spjd	}
707211984Spjd	return (error);
708212051Spjd}
709204076Spjd
710246922Spjd/*
711246922Spjd * Like _bus_dmamap_load(), but for uios.
712246922Spjd */
713246922Spjdint
714246922Spjdbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
715246922Spjd		    struct uio *uio,
716246922Spjd		    bus_dmamap_callback2_t *callback, void *callback_arg,
717246922Spjd		    int flags)
718246922Spjd{
719246922Spjd	bus_addr_t lastaddr;
720246922Spjd#ifdef __GNUC__
721246922Spjd	bus_dma_segment_t dm_segments[dmat->nsegments];
722246922Spjd#else
723246922Spjd	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
724246922Spjd#endif
725246922Spjd	int nsegs, error, first, i;
726246922Spjd	bus_size_t resid;
727246922Spjd	struct iovec *iov;
728246922Spjd	struct thread *td = NULL;
729246922Spjd
730246922Spjd	resid = uio->uio_resid;
731204076Spjd	iov = uio->uio_iov;
732226854Spjd
733204076Spjd	if (uio->uio_segflg == UIO_USERSPACE) {
734204076Spjd		td = uio->uio_td;
735211877Spjd		KASSERT(td != NULL,
736204076Spjd			("bus_dmamap_load_uio: USERSPACE but no proc"));
737204076Spjd	}
738204076Spjd
739204076Spjd	nsegs = 0;
740204076Spjd	error = 0;
741204076Spjd	first = 1;
742204076Spjd	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
743204076Spjd		/*
744204076Spjd		 * Now at the first iovec to load.  Load each iovec
745204076Spjd		 * until we have exhausted the residual count.
746204076Spjd		 */
747204076Spjd		bus_size_t minlen =
748204076Spjd			resid < iov[i].iov_len ? resid : iov[i].iov_len;
749204076Spjd		caddr_t addr = (caddr_t) iov[i].iov_base;
750204076Spjd
751225832Spjd		if (minlen > 0) {
752204076Spjd			error = _bus_dmamap_load_buffer(dmat, map,
753204076Spjd					dm_segments,
754204076Spjd					addr, minlen,
755204076Spjd					td, flags, &lastaddr, &nsegs, first);
756204076Spjd			first = 0;
757211877Spjd
758204076Spjd			resid -= minlen;
759204076Spjd		}
760204076Spjd	}
761204076Spjd
762204076Spjd	if (error) {
763204076Spjd		/* force "no valid mappings" in callback */
764204076Spjd		(*callback)(callback_arg, dm_segments, 0, 0, error);
765204076Spjd	} else {
766204076Spjd		(*callback)(callback_arg, dm_segments,
767204076Spjd			    nsegs+1, uio->uio_resid, error);
768204076Spjd	}
769204076Spjd	return (error);
770204076Spjd}
771204076Spjd
772204076Spjd/*
773204076Spjd * Release the mapping held by map.
774204076Spjd */
775204076Spjdvoid
776204076Spjd_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
777204076Spjd{
778204076Spjd	struct bounce_page *bpage;
779204076Spjd
780204076Spjd	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
781247281Strociny		STAILQ_REMOVE_HEAD(&map->bpages, links);
782204076Spjd		free_bounce_page(dmat, bpage);
783204076Spjd	}
784204076Spjd}
785204076Spjd
786204076Spjdvoid
787225831Spjd_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
788204076Spjd{
789204076Spjd	struct bounce_page *bpage;
790225832Spjd
791204076Spjd	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
792204076Spjd		/*
793204076Spjd		 * Handle data bouncing.  We might also
794204076Spjd		 * want to add support for invalidating
795204076Spjd		 * the caches on broken hardware
796204076Spjd		 */
797229945Spjd		if (op & BUS_DMASYNC_PREWRITE) {
798204076Spjd			while (bpage != NULL) {
799204076Spjd				bcopy((void *)bpage->datavaddr,
800204076Spjd				      (void *)bpage->vaddr,
801204076Spjd				      bpage->datacount);
802204076Spjd				bpage = STAILQ_NEXT(bpage, links);
803204076Spjd			}
804204076Spjd		}
805204076Spjd
806204076Spjd		if (op & BUS_DMASYNC_POSTREAD) {
807204076Spjd			while (bpage != NULL) {
808229945Spjd				bcopy((void *)bpage->vaddr,
809204076Spjd				      (void *)bpage->datavaddr,
810204076Spjd				      bpage->datacount);
811204076Spjd				bpage = STAILQ_NEXT(bpage, links);
812204076Spjd			}
813204076Spjd		}
814204076Spjd	}
815204076Spjd}
816204076Spjd
817204076Spjdstatic void
818204076Spjdinit_bounce_pages(void *dummy __unused)
819229945Spjd{
820204076Spjd
821204076Spjd	free_bpages = 0;
822204076Spjd	reserved_bpages = 0;
823204076Spjd	active_bpages = 0;
824204076Spjd	total_bpages = 0;
825225832Spjd	STAILQ_INIT(&bounce_page_list);
826225832Spjd	STAILQ_INIT(&bounce_map_waitinglist);
827225832Spjd	STAILQ_INIT(&bounce_map_callbacklist);
828225832Spjd	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
829225832Spjd}
830225832SpjdSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
831204076Spjd
832229945Spjdstatic int
833225832Spjdalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
834225832Spjd{
835204076Spjd	int count;
836225832Spjd
837204076Spjd	count = 0;
838225832Spjd	while (numpages > 0) {
839204076Spjd		struct bounce_page *bpage;
840226854Spjd
841226854Spjd		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
842226854Spjd						     M_NOWAIT | M_ZERO);
843204076Spjd
844225832Spjd		if (bpage == NULL)
845204076Spjd			break;
846204076Spjd		mtx_lock(&Giant);
847204076Spjd		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
848204076Spjd							 M_NOWAIT, 0ul,
849204076Spjd							 dmat->lowaddr,
850211877Spjd							 PAGE_SIZE,
851204076Spjd							 0);
852204076Spjd		mtx_unlock(&Giant);
853204076Spjd		if (bpage->vaddr == 0) {
854204076Spjd			free(bpage, M_DEVBUF);
855204076Spjd			break;
856204076Spjd		}
857204076Spjd		bpage->busaddr = pmap_kextract(bpage->vaddr);
858204076Spjd		mtx_lock(&bounce_lock);
859204076Spjd		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
860204076Spjd		total_bpages++;
861204076Spjd		free_bpages++;
862204076Spjd		mtx_unlock(&bounce_lock);
863204076Spjd		count++;
864204076Spjd		numpages--;
865204076Spjd	}
866204076Spjd	return (count);
867204076Spjd}
868204076Spjd
869204076Spjdstatic int
870211877Spjdreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
871204076Spjd{
872204076Spjd	int pages;
873204076Spjd
874226854Spjd	mtx_assert(&bounce_lock, MA_OWNED);
875246922Spjd	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
876246922Spjd	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
877246922Spjd		return (map->pagesneeded - (map->pagesreserved + pages));
878246922Spjd	free_bpages -= pages;
879204076Spjd	reserved_bpages += pages;
880204076Spjd	map->pagesreserved += pages;
881204076Spjd	pages = map->pagesneeded - map->pagesreserved;
882204076Spjd
883204076Spjd	return (pages);
884204076Spjd}
885204076Spjd
886204076Spjdstatic bus_addr_t
887204076Spjdadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
888204076Spjd		bus_size_t size)
889204076Spjd{
890204076Spjd	struct bounce_page *bpage;
891204076Spjd
892204076Spjd	KASSERT(map != NULL && map != &nobounce_dmamap,
893204076Spjd	    ("add_bounce_page: bad map %p", map));
894204076Spjd
895204076Spjd	if (map->pagesneeded == 0)
896204076Spjd		panic("add_bounce_page: map doesn't need any pages");
897225782Spjd	map->pagesneeded--;
898225782Spjd
899204076Spjd	if (map->pagesreserved == 0)
900247281Strociny		panic("add_bounce_page: map doesn't need any pages");
901247281Strociny	map->pagesreserved--;
902247281Strociny
903247281Strociny	mtx_lock(&bounce_lock);
904247281Strociny	bpage = STAILQ_FIRST(&bounce_page_list);
905247281Strociny	if (bpage == NULL)
906247281Strociny		panic("add_bounce_page: free page list is empty");
907247281Strociny
908247281Strociny	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
909247281Strociny	reserved_bpages--;
910247281Strociny	active_bpages++;
911247281Strociny	mtx_unlock(&bounce_lock);
912247281Strociny
913247281Strociny	bpage->datavaddr = vaddr;
914247281Strociny	bpage->datacount = size;
915204076Spjd	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
916247281Strociny	return (bpage->busaddr);
917204076Spjd}
918229945Spjd
919230092Spjdstatic void
920204076Spjdfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
921204076Spjd{
922209185Spjd	struct bus_dmamap *map;
923204076Spjd
924226854Spjd	bpage->datavaddr = 0;
925211877Spjd	bpage->datacount = 0;
926204076Spjd
927204076Spjd	mtx_lock(&bounce_lock);
928204076Spjd	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
929204076Spjd	free_bpages++;
930	active_bpages--;
931	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
932		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
933			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
934			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
935					   map, links);
936			busdma_swi_pending = 1;
937			swi_sched(vm_ih, 0);
938		}
939	}
940	mtx_unlock(&bounce_lock);
941}
942
943void
944busdma_swi(void)
945{
946	struct bus_dmamap *map;
947
948	mtx_lock(&bounce_lock);
949	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
950		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
951		mtx_unlock(&bounce_lock);
952		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
953				map->callback, map->callback_arg, /*flags*/0);
954		mtx_lock(&bounce_lock);
955	}
956	mtx_unlock(&bounce_lock);
957}
958