busdma_machdep.c revision 115316
1158115Sume/*
2158115Sume * Copyright (c) 1997, 1998 Justin T. Gibbs.
3158115Sume * All rights reserved.
4158115Sume *
5158115Sume * Redistribution and use in source and binary forms, with or without
6158115Sume * modification, are permitted provided that the following conditions
7158115Sume * are met:
8158115Sume * 1. Redistributions of source code must retain the above copyright
9158115Sume *    notice, this list of conditions, and the following disclaimer,
10158115Sume *    without modification, immediately at the beginning of the file.
11158115Sume * 2. The name of the author may not be used to endorse or promote products
12158115Sume *    derived from this software without specific prior written permission.
13158115Sume *
14158115Sume * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15158115Sume * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16158115Sume * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17158115Sume * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18158115Sume * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19158115Sume * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20158115Sume * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21158115Sume * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22158115Sume * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23158115Sume * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24158115Sume * SUCH DAMAGE.
25158115Sume *
26158115Sume * $FreeBSD: head/sys/i386/i386/busdma_machdep.c 115316 2003-05-26 04:00:52Z scottl $
27158115Sume */
28158115Sume
29158115Sume#include <sys/param.h>
30158115Sume#include <sys/systm.h>
31158115Sume#include <sys/malloc.h>
32158115Sume#include <sys/bus.h>
33158115Sume#include <sys/interrupt.h>
34158115Sume#include <sys/kernel.h>
35158115Sume#include <sys/lock.h>
36158115Sume#include <sys/proc.h>
37158115Sume#include <sys/mutex.h>
38158115Sume#include <sys/mbuf.h>
39158115Sume#include <sys/uio.h>
40158115Sume
41158115Sume#include <vm/vm.h>
42158115Sume#include <vm/vm_page.h>
43158115Sume#include <vm/vm_map.h>
44158115Sume
45158115Sume#include <machine/atomic.h>
46158115Sume#include <machine/bus.h>
47158115Sume#include <machine/md_var.h>
48158115Sume
49158115Sume#define MAX_BPAGES 512
50158115Sume
51158115Sumestruct bus_dma_tag {
52158115Sume	bus_dma_tag_t	  parent;
53158115Sume	bus_size_t	  alignment;
54158115Sume	bus_size_t	  boundary;
55158115Sume	bus_addr_t	  lowaddr;
56158115Sume	bus_addr_t	  highaddr;
57158115Sume	bus_dma_filter_t *filter;
58158115Sume	void		 *filterarg;
59158115Sume	bus_size_t	  maxsize;
60158115Sume	u_int		  nsegments;
61158115Sume	bus_size_t	  maxsegsz;
62158115Sume	int		  flags;
63158115Sume	int		  ref_count;
64158115Sume	int		  map_count;
65158115Sume};
66158115Sume
67158115Sumestruct bounce_page {
68158115Sume	vm_offset_t	vaddr;		/* kva of bounce buffer */
69158115Sume	bus_addr_t	busaddr;	/* Physical address */
70158115Sume	vm_offset_t	datavaddr;	/* kva of client data */
71158115Sume	bus_size_t	datacount;	/* client data count */
72158115Sume	STAILQ_ENTRY(bounce_page) links;
73158115Sume};
74158115Sume
75158115Sumeint busdma_swi_pending;
76158115Sume
77158115Sumestatic STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
78158115Sumestatic int free_bpages;
79158115Sumestatic int reserved_bpages;
80158115Sumestatic int active_bpages;
81158115Sumestatic int total_bpages;
82158115Sumestatic bus_addr_t bounce_lowaddr = BUS_SPACE_MAXADDR;
83158115Sume
84158115Sumestruct bus_dmamap {
85158115Sume	struct bp_list	       bpages;
86158115Sume	int		       pagesneeded;
87158115Sume	int		       pagesreserved;
88158115Sume	bus_dma_tag_t	       dmat;
89158115Sume	void		      *buf;		/* unmapped buffer pointer */
90158115Sume	bus_size_t	       buflen;		/* unmapped buffer length */
91158115Sume	bus_dmamap_callback_t *callback;
92158115Sume	void		      *callback_arg;
93158115Sume	STAILQ_ENTRY(bus_dmamap) links;
94158115Sume};
95158115Sume
96158115Sumestatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
97158115Sumestatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
98158115Sumestatic struct bus_dmamap nobounce_dmamap;
99158115Sume
100158115Sumestatic void init_bounce_pages(void *dummy);
101158115Sumestatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
102158115Sumestatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
103158115Sume    				int commit);
104158115Sumestatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
105158115Sume				   vm_offset_t vaddr, bus_size_t size);
106158115Sumestatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
107158115Sumestatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
108158115Sume
109158115Sume/* To protect all the the bounce pages related lists and data. */
110158115Sumestatic struct mtx bounce_lock;
111158115Sume
112158115Sume/*
113158115Sume * Return true if a match is made.
114158115Sume *
115158115Sume * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
116158115Sume *
117158115Sume * If paddr is within the bounds of the dma tag then call the filter callback
118158115Sume * to check for a match, if there is no filter callback then assume a match.
119158115Sume */
120158115Sumestatic __inline int
121158115Sumerun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
122158115Sume{
123158115Sume	int retval;
124158115Sume
125158115Sume	retval = 0;
126158115Sume	do {
127158115Sume		if (paddr > dmat->lowaddr
128158115Sume		 && paddr <= dmat->highaddr
129158115Sume		 && (dmat->filter == NULL
130158115Sume		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
131158115Sume			retval = 1;
132158115Sume
133158115Sume		dmat = dmat->parent;
134158115Sume	} while (retval == 0 && dmat != NULL);
135158115Sume	return (retval);
136158115Sume}
137158115Sume
138158115Sume#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4
139158115Sume/*
140158115Sume * Allocate a device specific dma_tag.
141158115Sume */
142158115Sumeint
143158115Sumebus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
144158115Sume		   bus_size_t boundary, bus_addr_t lowaddr,
145158115Sume		   bus_addr_t highaddr, bus_dma_filter_t *filter,
146158115Sume		   void *filterarg, bus_size_t maxsize, int nsegments,
147158115Sume		   bus_size_t maxsegsz, int flags, bus_dma_tag_t *dmat)
148158115Sume{
149158115Sume	bus_dma_tag_t newtag;
150158115Sume	int error = 0;
151158115Sume
152158115Sume	/* Return a NULL tag on failure */
153158115Sume	*dmat = NULL;
154158115Sume
155158115Sume	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
156158115Sume	if (newtag == NULL)
157158115Sume		return (ENOMEM);
158158115Sume
159158115Sume	newtag->parent = parent;
160158115Sume	newtag->alignment = alignment;
161158115Sume	newtag->boundary = boundary;
162158115Sume	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
163158115Sume	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
164158115Sume	    (PAGE_SIZE - 1);
165158115Sume	newtag->filter = filter;
166158115Sume	newtag->filterarg = filterarg;
167158115Sume	newtag->maxsize = maxsize;
168158115Sume	newtag->nsegments = nsegments;
169158115Sume	newtag->maxsegsz = maxsegsz;
170158115Sume	newtag->flags = flags;
171158115Sume	newtag->ref_count = 1; /* Count ourself */
172158115Sume	newtag->map_count = 0;
173158115Sume
174158115Sume	/* Take into account any restrictions imposed by our parent tag */
175158115Sume	if (parent != NULL) {
176158115Sume		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
177158115Sume		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
178158115Sume		/*
179158115Sume		 * XXX Not really correct??? Probably need to honor boundary
180158115Sume		 *     all the way up the inheritence chain.
181158115Sume		 */
182158115Sume		newtag->boundary = MAX(parent->boundary, newtag->boundary);
183158115Sume		if (newtag->filter == NULL) {
184158115Sume			/*
185158115Sume			 * Short circuit looking at our parent directly
186158115Sume			 * since we have encapsulated all of its information
187158115Sume			 */
188158115Sume			newtag->filter = parent->filter;
189158115Sume			newtag->filterarg = parent->filterarg;
190158115Sume			newtag->parent = parent->parent;
191158115Sume		}
192158115Sume		if (newtag->parent != NULL)
193158115Sume			atomic_add_int(&parent->ref_count, 1);
194158115Sume	}
195158115Sume
196158115Sume	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) &&
197158115Sume	    (flags & BUS_DMA_ALLOCNOW) != 0) {
198158115Sume		/* Must bounce */
199158115Sume
200158115Sume		if (lowaddr > bounce_lowaddr) {
201158115Sume			/*
202158115Sume			 * Go through the pool and kill any pages
203158115Sume			 * that don't reside below lowaddr.
204158115Sume			 */
205158115Sume			panic("bus_dma_tag_create: page reallocation "
206158115Sume			      "not implemented");
207158115Sume		}
208158115Sume		if (ptoa(total_bpages) < maxsize) {
209158115Sume			int pages;
210158115Sume
211158115Sume			pages = atop(maxsize) - total_bpages;
212158115Sume
213158115Sume			/* Add pages to our bounce pool */
214158115Sume			if (alloc_bounce_pages(newtag, pages) < pages)
215158115Sume				error = ENOMEM;
216158115Sume		}
217158115Sume		/* Performed initial allocation */
218158115Sume		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
219158115Sume	}
220158115Sume
221158115Sume	if (error != 0) {
222158115Sume		free(newtag, M_DEVBUF);
223158115Sume	} else {
224158115Sume		*dmat = newtag;
225158115Sume	}
226158115Sume	return (error);
227158115Sume}
228158115Sume
229158115Sumeint
230158115Sumebus_dma_tag_destroy(bus_dma_tag_t dmat)
231158115Sume{
232158115Sume	if (dmat != NULL) {
233158115Sume
234158115Sume		if (dmat->map_count != 0)
235158115Sume			return (EBUSY);
236158115Sume
237158115Sume		while (dmat != NULL) {
238158115Sume			bus_dma_tag_t parent;
239158115Sume
240158115Sume			parent = dmat->parent;
241158115Sume			atomic_subtract_int(&dmat->ref_count, 1);
242158115Sume			if (dmat->ref_count == 0) {
243158115Sume				free(dmat, M_DEVBUF);
244158115Sume				/*
245158115Sume				 * Last reference count, so
246158115Sume				 * release our reference
247158115Sume				 * count on our parent.
248158115Sume				 */
249158115Sume				dmat = parent;
250158115Sume			} else
251158115Sume				dmat = NULL;
252158115Sume		}
253158115Sume	}
254158115Sume	return (0);
255158115Sume}
256158115Sume
257158115Sume/*
258158115Sume * Allocate a handle for mapping from kva/uva/physical
259158115Sume * address space into bus device space.
260158115Sume */
261158115Sumeint
262158115Sumebus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
263158115Sume{
264158115Sume	int error;
265158115Sume
266158115Sume	error = 0;
267158115Sume
268158115Sume	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
269158115Sume		/* Must bounce */
270158115Sume		int maxpages;
271158115Sume
272158115Sume		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
273158115Sume					     M_NOWAIT | M_ZERO);
274158115Sume		if (*mapp == NULL)
275158115Sume			return (ENOMEM);
276158115Sume
277158115Sume		/* Initialize the new map */
278158115Sume		STAILQ_INIT(&((*mapp)->bpages));
279158115Sume
280158115Sume		/*
281158115Sume		 * Attempt to add pages to our pool on a per-instance
282158115Sume		 * basis up to a sane limit.
283158115Sume		 */
284158115Sume		maxpages = MIN(MAX_BPAGES, Maxmem - atop(dmat->lowaddr));
285158115Sume		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
286158115Sume		 || (dmat->map_count > 0
287158115Sume		  && total_bpages < maxpages)) {
288158115Sume			int pages;
289158115Sume
290158115Sume			if (dmat->lowaddr > bounce_lowaddr) {
291158115Sume				/*
292158115Sume				 * Go through the pool and kill any pages
293158115Sume				 * that don't reside below lowaddr.
294158115Sume				 */
295158115Sume				panic("bus_dmamap_create: page reallocation "
296158115Sume				      "not implemented");
297158115Sume			}
298158115Sume			pages = MAX(atop(dmat->maxsize), 1);
299158115Sume			pages = MIN(maxpages - total_bpages, pages);
300158115Sume			if (alloc_bounce_pages(dmat, pages) < pages)
301158115Sume				error = ENOMEM;
302158115Sume
303158115Sume			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
304158115Sume				if (error == 0)
305158115Sume					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
306158115Sume			} else {
307158115Sume				error = 0;
308158115Sume			}
309158115Sume		}
310158115Sume	} else {
311158115Sume		*mapp = NULL;
312158115Sume	}
313158115Sume	if (error == 0)
314158115Sume		dmat->map_count++;
315158115Sume	return (error);
316158115Sume}
317158115Sume
318158115Sume/*
319158115Sume * Destroy a handle for mapping from kva/uva/physical
320158115Sume * address space into bus device space.
321158115Sume */
322158115Sumeint
323158115Sumebus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
324158115Sume{
325158115Sume	if (map != NULL) {
326158115Sume		if (STAILQ_FIRST(&map->bpages) != NULL)
327158115Sume			return (EBUSY);
328158115Sume		free(map, M_DEVBUF);
329158115Sume	}
330158115Sume	dmat->map_count--;
331158115Sume	return (0);
332158115Sume}
333158115Sume
334158115Sume
335158115Sume/*
336158115Sume * Allocate a piece of memory that can be efficiently mapped into
337158115Sume * bus device space based on the constraints lited in the dma tag.
338158115Sume * A dmamap to for use with dmamap_load is also allocated.
339158115Sume */
340158115Sumeint
341158115Sumebus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
342158115Sume		 bus_dmamap_t *mapp)
343158115Sume{
344158115Sume	/* If we succeed, no mapping/bouncing will be required */
345158115Sume	*mapp = NULL;
346158115Sume
347158115Sume	if ((dmat->maxsize <= PAGE_SIZE) &&
348158115Sume	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
349158115Sume		*vaddr = malloc(dmat->maxsize, M_DEVBUF,
350158115Sume				(flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
351158115Sume	} else {
352158115Sume		/*
353158115Sume		 * XXX Use Contigmalloc until it is merged into this facility
354158115Sume		 *     and handles multi-seg allocations.  Nobody is doing
355158115Sume		 *     multi-seg allocations yet though.
356158115Sume		 */
357158115Sume		mtx_lock(&Giant);
358158115Sume		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF,
359158115Sume		    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK,
360158115Sume		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
361158115Sume		    dmat->boundary);
362158115Sume		mtx_unlock(&Giant);
363158115Sume	}
364158115Sume	if (*vaddr == NULL)
365158115Sume		return (ENOMEM);
366158115Sume	return (0);
367158115Sume}
368158115Sume
369158115Sume/*
370158115Sume * Free a piece of memory and it's allociated dmamap, that was allocated
371158115Sume * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
372158115Sume */
373158115Sumevoid
374158115Sumebus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
375158115Sume{
376158115Sume	/*
377158115Sume	 * dmamem does not need to be bounced, so the map should be
378158115Sume	 * NULL
379158115Sume	 */
380158115Sume	if (map != NULL)
381158115Sume		panic("bus_dmamem_free: Invalid map freed\n");
382158115Sume	if ((dmat->maxsize <= PAGE_SIZE)
383158115Sume	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
384158115Sume		free(vaddr, M_DEVBUF);
385158115Sume	else {
386158115Sume		mtx_lock(&Giant);
387158115Sume		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
388158115Sume		mtx_unlock(&Giant);
389158115Sume	}
390158115Sume}
391158115Sume
392158115Sume/*
393158115Sume * Utility function to load a linear buffer.  lastaddrp holds state
394158115Sume * between invocations (for multiple-buffer loads).  segp contains
395158115Sume * the starting segment on entrace, and the ending segment on exit.
396158115Sume * first indicates if this is the first invocation of this function.
397158115Sume */
398158115Sumestatic int
399158115Sume_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
400158115Sume    			bus_dmamap_t map,
401158115Sume			bus_dma_segment_t segs[],
402158115Sume			void *buf, bus_size_t buflen,
403158115Sume			struct thread *td,
404158115Sume			int flags,
405158115Sume			bus_addr_t *lastaddrp,
406158115Sume			int *segp,
407158115Sume			int first)
408158115Sume{
409158115Sume	bus_size_t sgsize;
410158115Sume	bus_addr_t curaddr, lastaddr, baddr, bmask;
411158115Sume	vm_offset_t vaddr;
412158115Sume	bus_addr_t paddr;
413158115Sume	int needbounce = 0;
414158115Sume	int seg;
415158115Sume	pmap_t pmap;
416158115Sume
417158115Sume	if (map == NULL)
418158115Sume		map = &nobounce_dmamap;
419158115Sume
420158115Sume	if (td != NULL)
421158115Sume		pmap = vmspace_pmap(td->td_proc->p_vmspace);
422158115Sume	else
423158115Sume		pmap = NULL;
424158115Sume
425158115Sume	if (dmat->lowaddr < ptoa((vm_paddr_t)Maxmem)) {
426158115Sume		vm_offset_t	vendaddr;
427158115Sume
428158115Sume		/*
429158115Sume		 * Count the number of bounce pages
430158115Sume		 * needed in order to complete this transfer
431158115Sume		 */
432158115Sume		vaddr = trunc_page((vm_offset_t)buf);
433158115Sume		vendaddr = (vm_offset_t)buf + buflen;
434158115Sume
435158115Sume		while (vaddr < vendaddr) {
436158115Sume			paddr = pmap_kextract(vaddr);
437158115Sume			if (run_filter(dmat, paddr) != 0) {
438158115Sume				needbounce = 1;
439				map->pagesneeded++;
440			}
441			vaddr += PAGE_SIZE;
442		}
443	}
444
445	vaddr = (vm_offset_t)buf;
446
447	/* Reserve Necessary Bounce Pages */
448	if (map->pagesneeded != 0) {
449		mtx_lock(&bounce_lock);
450		if (flags & BUS_DMA_NOWAIT) {
451			if (reserve_bounce_pages(dmat, map, 0) != 0) {
452				mtx_unlock(&bounce_lock);
453				return (ENOMEM);
454			}
455		} else {
456			if (reserve_bounce_pages(dmat, map, 1) != 0) {
457				/* Queue us for resources */
458				map->dmat = dmat;
459				map->buf = buf;
460				map->buflen = buflen;
461				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
462								map, links);
463				mtx_unlock(&bounce_lock);
464				return (EINPROGRESS);
465			}
466		}
467		mtx_unlock(&bounce_lock);
468	}
469
470	lastaddr = *lastaddrp;
471	bmask = ~(dmat->boundary - 1);
472
473	for (seg = *segp; buflen > 0 ; ) {
474		/*
475		 * Get the physical address for this segment.
476		 */
477		if (pmap)
478			curaddr = pmap_extract(pmap, vaddr);
479		else
480			curaddr = pmap_kextract(vaddr);
481
482		/*
483		 * Compute the segment size, and adjust counts.
484		 */
485		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
486		if (buflen < sgsize)
487			sgsize = buflen;
488
489		/*
490		 * Make sure we don't cross any boundaries.
491		 */
492		if (dmat->boundary > 0) {
493			baddr = (curaddr + dmat->boundary) & bmask;
494			if (sgsize > (baddr - curaddr))
495				sgsize = (baddr - curaddr);
496		}
497
498		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
499			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
500
501		/*
502		 * Insert chunk into a segment, coalescing with
503		 * previous segment if possible.
504		 */
505		if (first) {
506			segs[seg].ds_addr = curaddr;
507			segs[seg].ds_len = sgsize;
508			first = 0;
509		} else {
510			if (needbounce == 0 && curaddr == lastaddr &&
511			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
512			    (dmat->boundary == 0 ||
513			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
514				segs[seg].ds_len += sgsize;
515			else {
516				if (++seg >= dmat->nsegments)
517					break;
518				segs[seg].ds_addr = curaddr;
519				segs[seg].ds_len = sgsize;
520			}
521		}
522
523		lastaddr = curaddr + sgsize;
524		vaddr += sgsize;
525		buflen -= sgsize;
526	}
527
528	*segp = seg;
529	*lastaddrp = lastaddr;
530
531	/*
532	 * Did we fit?
533	 */
534	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
535}
536
537#define BUS_DMAMAP_NSEGS ((64 * 1024) / PAGE_SIZE + 1)
538
539/*
540 * Map the buffer buf into bus space using the dmamap map.
541 */
542int
543bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
544		bus_size_t buflen, bus_dmamap_callback_t *callback,
545		void *callback_arg, int flags)
546{
547#ifdef __GNUC__
548	bus_dma_segment_t	dm_segments[dmat->nsegments];
549#else
550	bus_dma_segment_t	dm_segments[BUS_DMAMAP_NSEGS];
551#endif
552	bus_addr_t		lastaddr = 0;
553	int			error, nsegs = 0;
554
555	if (map != NULL) {
556		flags |= BUS_DMA_WAITOK;
557		map->callback = callback;
558		map->callback_arg = callback_arg;
559	}
560
561	error = _bus_dmamap_load_buffer(dmat, map, dm_segments, buf, buflen,
562	    NULL, flags, &lastaddr, &nsegs, 1);
563
564	if (error == EINPROGRESS)
565		return (error);
566
567	if (error)
568		(*callback)(callback_arg, dm_segments, 0, error);
569	else
570		(*callback)(callback_arg, dm_segments, nsegs + 1, 0);
571
572	return (0);
573}
574
575
576/*
577 * Like _bus_dmamap_load(), but for mbufs.
578 */
579int
580bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
581		     struct mbuf *m0,
582		     bus_dmamap_callback2_t *callback, void *callback_arg,
583		     int flags)
584{
585#ifdef __GNUC__
586	bus_dma_segment_t dm_segments[dmat->nsegments];
587#else
588	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
589#endif
590	int nsegs, error;
591
592	KASSERT(m0->m_flags & M_PKTHDR,
593		("bus_dmamap_load_mbuf: no packet header"));
594
595	flags |= BUS_DMA_NOWAIT;
596	nsegs = 0;
597	error = 0;
598	if (m0->m_pkthdr.len <= dmat->maxsize) {
599		int first = 1;
600		bus_addr_t lastaddr = 0;
601		struct mbuf *m;
602
603		for (m = m0; m != NULL && error == 0; m = m->m_next) {
604			if (m->m_len > 0) {
605				error = _bus_dmamap_load_buffer(dmat, map,
606						dm_segments,
607						m->m_data, m->m_len,
608						NULL, flags, &lastaddr,
609						&nsegs, first);
610				first = 0;
611			}
612		}
613	} else {
614		error = EINVAL;
615	}
616
617	if (error) {
618		/* force "no valid mappings" in callback */
619		(*callback)(callback_arg, dm_segments, 0, 0, error);
620	} else {
621		(*callback)(callback_arg, dm_segments,
622			    nsegs+1, m0->m_pkthdr.len, error);
623	}
624	return (error);
625}
626
627/*
628 * Like _bus_dmamap_load(), but for uios.
629 */
630int
631bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
632		    struct uio *uio,
633		    bus_dmamap_callback2_t *callback, void *callback_arg,
634		    int flags)
635{
636	bus_addr_t lastaddr;
637#ifdef __GNUC__
638	bus_dma_segment_t dm_segments[dmat->nsegments];
639#else
640	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
641#endif
642	int nsegs, error, first, i;
643	bus_size_t resid;
644	struct iovec *iov;
645	struct thread *td = NULL;
646
647	flags |= BUS_DMA_NOWAIT;
648	resid = uio->uio_resid;
649	iov = uio->uio_iov;
650
651	if (uio->uio_segflg == UIO_USERSPACE) {
652		td = uio->uio_td;
653		KASSERT(td != NULL,
654			("bus_dmamap_load_uio: USERSPACE but no proc"));
655	}
656
657	nsegs = 0;
658	error = 0;
659	first = 1;
660	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
661		/*
662		 * Now at the first iovec to load.  Load each iovec
663		 * until we have exhausted the residual count.
664		 */
665		bus_size_t minlen =
666			resid < iov[i].iov_len ? resid : iov[i].iov_len;
667		caddr_t addr = (caddr_t) iov[i].iov_base;
668
669		if (minlen > 0) {
670			error = _bus_dmamap_load_buffer(dmat, map,
671					dm_segments,
672					addr, minlen,
673					td, flags, &lastaddr, &nsegs, first);
674			first = 0;
675
676			resid -= minlen;
677		}
678	}
679
680	if (error) {
681		/* force "no valid mappings" in callback */
682		(*callback)(callback_arg, dm_segments, 0, 0, error);
683	} else {
684		(*callback)(callback_arg, dm_segments,
685			    nsegs+1, uio->uio_resid, error);
686	}
687	return (error);
688}
689
690/*
691 * Release the mapping held by map.
692 */
693void
694_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
695{
696	struct bounce_page *bpage;
697
698	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
699		STAILQ_REMOVE_HEAD(&map->bpages, links);
700		free_bounce_page(dmat, bpage);
701	}
702}
703
704void
705_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, int op)
706{
707	struct bounce_page *bpage;
708
709	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
710		/*
711		 * Handle data bouncing.  We might also
712		 * want to add support for invalidating
713		 * the caches on broken hardware
714		 */
715		if (op & BUS_DMASYNC_PREWRITE) {
716			while (bpage != NULL) {
717				bcopy((void *)bpage->datavaddr,
718				      (void *)bpage->vaddr,
719				      bpage->datacount);
720				bpage = STAILQ_NEXT(bpage, links);
721			}
722		}
723
724		if (op & BUS_DMASYNC_POSTREAD) {
725			while (bpage != NULL) {
726				bcopy((void *)bpage->vaddr,
727				      (void *)bpage->datavaddr,
728				      bpage->datacount);
729				bpage = STAILQ_NEXT(bpage, links);
730			}
731		}
732	}
733}
734
735static void
736init_bounce_pages(void *dummy __unused)
737{
738
739	free_bpages = 0;
740	reserved_bpages = 0;
741	active_bpages = 0;
742	total_bpages = 0;
743	STAILQ_INIT(&bounce_page_list);
744	STAILQ_INIT(&bounce_map_waitinglist);
745	STAILQ_INIT(&bounce_map_callbacklist);
746	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
747}
748SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
749
750static int
751alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
752{
753	int count;
754
755	count = 0;
756	while (numpages > 0) {
757		struct bounce_page *bpage;
758
759		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
760						     M_NOWAIT | M_ZERO);
761
762		if (bpage == NULL)
763			break;
764		mtx_lock(&Giant);
765		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
766							 M_NOWAIT, 0ul,
767							 dmat->lowaddr,
768							 PAGE_SIZE,
769							 0);
770		mtx_unlock(&Giant);
771		if (bpage->vaddr == 0) {
772			free(bpage, M_DEVBUF);
773			break;
774		}
775		bpage->busaddr = pmap_kextract(bpage->vaddr);
776		mtx_lock(&bounce_lock);
777		STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links);
778		total_bpages++;
779		free_bpages++;
780		mtx_unlock(&bounce_lock);
781		count++;
782		numpages--;
783	}
784	return (count);
785}
786
787static int
788reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
789{
790	int pages;
791
792	mtx_assert(&bounce_lock, MA_OWNED);
793	pages = MIN(free_bpages, map->pagesneeded - map->pagesreserved);
794	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
795		return (map->pagesneeded - (map->pagesreserved + pages));
796	free_bpages -= pages;
797	reserved_bpages += pages;
798	map->pagesreserved += pages;
799	pages = map->pagesneeded - map->pagesreserved;
800
801	return (pages);
802}
803
804static bus_addr_t
805add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
806		bus_size_t size)
807{
808	struct bounce_page *bpage;
809
810	KASSERT(map != NULL && map != &nobounce_dmamap,
811	    ("add_bounce_page: bad map %p", map));
812
813	if (map->pagesneeded == 0)
814		panic("add_bounce_page: map doesn't need any pages");
815	map->pagesneeded--;
816
817	if (map->pagesreserved == 0)
818		panic("add_bounce_page: map doesn't need any pages");
819	map->pagesreserved--;
820
821	mtx_lock(&bounce_lock);
822	bpage = STAILQ_FIRST(&bounce_page_list);
823	if (bpage == NULL)
824		panic("add_bounce_page: free page list is empty");
825
826	STAILQ_REMOVE_HEAD(&bounce_page_list, links);
827	reserved_bpages--;
828	active_bpages++;
829	mtx_unlock(&bounce_lock);
830
831	bpage->datavaddr = vaddr;
832	bpage->datacount = size;
833	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
834	return (bpage->busaddr);
835}
836
837static void
838free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
839{
840	struct bus_dmamap *map;
841
842	bpage->datavaddr = 0;
843	bpage->datacount = 0;
844
845	mtx_lock(&bounce_lock);
846	STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links);
847	free_bpages++;
848	active_bpages--;
849	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
850		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
851			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
852			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
853					   map, links);
854			busdma_swi_pending = 1;
855			swi_sched(vm_ih, 0);
856		}
857	}
858	mtx_unlock(&bounce_lock);
859}
860
861void
862busdma_swi(void)
863{
864	struct bus_dmamap *map;
865
866	mtx_lock(&bounce_lock);
867	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
868		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
869		mtx_unlock(&bounce_lock);
870		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
871				map->callback, map->callback_arg, /*flags*/0);
872		mtx_lock(&bounce_lock);
873	}
874	mtx_unlock(&bounce_lock);
875}
876