busdma_machdep.c revision 152775
1/*-
2 * Copyright (c) 1997, 1998 Justin T. Gibbs.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 152775 2005-11-24 15:28:32Z le $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/malloc.h>
33#include <sys/bus.h>
34#include <sys/interrupt.h>
35#include <sys/kernel.h>
36#include <sys/ktr.h>
37#include <sys/lock.h>
38#include <sys/proc.h>
39#include <sys/mutex.h>
40#include <sys/mbuf.h>
41#include <sys/uio.h>
42#include <sys/sysctl.h>
43
44#include <vm/vm.h>
45#include <vm/vm_page.h>
46#include <vm/vm_map.h>
47
48#include <machine/atomic.h>
49#include <machine/bus.h>
50#include <machine/md_var.h>
51
52#define MAX_BPAGES 512
53
54struct bounce_zone;
55
56struct bus_dma_tag {
57	bus_dma_tag_t	  parent;
58	bus_size_t	  alignment;
59	bus_size_t	  boundary;
60	bus_addr_t	  lowaddr;
61	bus_addr_t	  highaddr;
62	bus_dma_filter_t *filter;
63	void		 *filterarg;
64	bus_size_t	  maxsize;
65	u_int		  nsegments;
66	bus_size_t	  maxsegsz;
67	int		  flags;
68	int		  ref_count;
69	int		  map_count;
70	bus_dma_lock_t	 *lockfunc;
71	void		 *lockfuncarg;
72	bus_dma_segment_t *segments;
73	struct bounce_zone *bounce_zone;
74};
75
76struct bounce_page {
77	vm_offset_t	vaddr;		/* kva of bounce buffer */
78	bus_addr_t	busaddr;	/* Physical address */
79	vm_offset_t	datavaddr;	/* kva of client data */
80	bus_size_t	datacount;	/* client data count */
81	STAILQ_ENTRY(bounce_page) links;
82};
83
84int busdma_swi_pending;
85
86struct bounce_zone {
87	STAILQ_ENTRY(bounce_zone) links;
88	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
89	int		total_bpages;
90	int		free_bpages;
91	int		reserved_bpages;
92	int		active_bpages;
93	int		total_bounced;
94	int		total_deferred;
95	bus_size_t	alignment;
96	bus_size_t	boundary;
97	bus_addr_t	lowaddr;
98	char		zoneid[8];
99	char		lowaddrid[20];
100	struct sysctl_ctx_list sysctl_tree;
101	struct sysctl_oid *sysctl_tree_top;
102};
103
104static struct mtx bounce_lock;
105static int total_bpages;
106static int busdma_zonecount;
107static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
108
109SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
110SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
111	   "Total bounce pages");
112
113struct bus_dmamap {
114	struct bp_list	       bpages;
115	int		       pagesneeded;
116	int		       pagesreserved;
117	bus_dma_tag_t	       dmat;
118	void		      *buf;		/* unmapped buffer pointer */
119	bus_size_t	       buflen;		/* unmapped buffer length */
120	bus_dmamap_callback_t *callback;
121	void		      *callback_arg;
122	STAILQ_ENTRY(bus_dmamap) links;
123};
124
125static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
126static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
127static struct bus_dmamap nobounce_dmamap;
128
129static void init_bounce_pages(void *dummy);
130static int alloc_bounce_zone(bus_dma_tag_t dmat);
131static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
132static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
133				int commit);
134static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
135				   vm_offset_t vaddr, bus_size_t size);
136static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
137static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
138
139/*
140 * Return true if a match is made.
141 *
142 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
143 *
144 * If paddr is within the bounds of the dma tag then call the filter callback
145 * to check for a match, if there is no filter callback then assume a match.
146 */
147static __inline int
148run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
149{
150	int retval;
151
152	retval = 0;
153
154	do {
155		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
156		 || ((paddr & (dmat->alignment - 1)) != 0))
157		 && (dmat->filter == NULL
158		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
159			retval = 1;
160
161		dmat = dmat->parent;
162	} while (retval == 0 && dmat != NULL);
163	return (retval);
164}
165
166/*
167 * Convenience function for manipulating driver locks from busdma (during
168 * busdma_swi, for example).  Drivers that don't provide their own locks
169 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
170 * non-mutex locking scheme don't have to use this at all.
171 */
172void
173busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
174{
175	struct mtx *dmtx;
176
177	dmtx = (struct mtx *)arg;
178	switch (op) {
179	case BUS_DMA_LOCK:
180		mtx_lock(dmtx);
181		break;
182	case BUS_DMA_UNLOCK:
183		mtx_unlock(dmtx);
184		break;
185	default:
186		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
187	}
188}
189
190/*
191 * dflt_lock should never get called.  It gets put into the dma tag when
192 * lockfunc == NULL, which is only valid if the maps that are associated
193 * with the tag are meant to never be defered.
194 * XXX Should have a way to identify which driver is responsible here.
195 */
196static void
197dflt_lock(void *arg, bus_dma_lock_op_t op)
198{
199	panic("driver error: busdma dflt_lock called");
200}
201
202#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
203#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
204/*
205 * Allocate a device specific dma_tag.
206 */
207int
208bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
209		   bus_size_t boundary, bus_addr_t lowaddr,
210		   bus_addr_t highaddr, bus_dma_filter_t *filter,
211		   void *filterarg, bus_size_t maxsize, int nsegments,
212		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
213		   void *lockfuncarg, bus_dma_tag_t *dmat)
214{
215	bus_dma_tag_t newtag;
216	int error = 0;
217
218	/* Basic sanity checking */
219	if (boundary != 0 && boundary < maxsegsz)
220		maxsegsz = boundary;
221
222	/* Return a NULL tag on failure */
223	*dmat = NULL;
224
225	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
226	    M_ZERO | M_NOWAIT);
227	if (newtag == NULL) {
228		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
229		    __func__, newtag, 0, error);
230		return (ENOMEM);
231	}
232
233	newtag->parent = parent;
234	newtag->alignment = alignment;
235	newtag->boundary = boundary;
236	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
237	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
238	    (PAGE_SIZE - 1);
239	newtag->filter = filter;
240	newtag->filterarg = filterarg;
241	newtag->maxsize = maxsize;
242	newtag->nsegments = nsegments;
243	newtag->maxsegsz = maxsegsz;
244	newtag->flags = flags;
245	newtag->ref_count = 1; /* Count ourself */
246	newtag->map_count = 0;
247	if (lockfunc != NULL) {
248		newtag->lockfunc = lockfunc;
249		newtag->lockfuncarg = lockfuncarg;
250	} else {
251		newtag->lockfunc = dflt_lock;
252		newtag->lockfuncarg = NULL;
253	}
254	newtag->segments = NULL;
255
256	/* Take into account any restrictions imposed by our parent tag */
257	if (parent != NULL) {
258		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
259		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
260		if (newtag->boundary == 0)
261			newtag->boundary = parent->boundary;
262		else if (parent->boundary != 0)
263			newtag->boundary = MIN(parent->boundary,
264					       newtag->boundary);
265		if (newtag->filter == NULL) {
266			/*
267			 * Short circuit looking at our parent directly
268			 * since we have encapsulated all of its information
269			 */
270			newtag->filter = parent->filter;
271			newtag->filterarg = parent->filterarg;
272			newtag->parent = parent->parent;
273		}
274		if (newtag->parent != NULL)
275			atomic_add_int(&parent->ref_count, 1);
276	}
277
278	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
279	 || newtag->alignment > 1)
280		newtag->flags |= BUS_DMA_COULD_BOUNCE;
281
282	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
283	    (flags & BUS_DMA_ALLOCNOW) != 0) {
284		struct bounce_zone *bz;
285
286		/* Must bounce */
287
288		if ((error = alloc_bounce_zone(newtag)) != 0)
289			return (error);
290		bz = newtag->bounce_zone;
291
292		if (ptoa(bz->total_bpages) < maxsize) {
293			int pages;
294
295			pages = atop(maxsize) - bz->total_bpages;
296
297			/* Add pages to our bounce pool */
298			if (alloc_bounce_pages(newtag, pages) < pages)
299				error = ENOMEM;
300		}
301		/* Performed initial allocation */
302		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
303	}
304
305	if (error != 0) {
306		free(newtag, M_DEVBUF);
307	} else {
308		*dmat = newtag;
309	}
310	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
311	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
312	return (error);
313}
314
315int
316bus_dma_tag_destroy(bus_dma_tag_t dmat)
317{
318	bus_dma_tag_t dmat_copy;
319	int error;
320
321	error = 0;
322	dmat_copy = dmat;
323
324	if (dmat != NULL) {
325
326		if (dmat->map_count != 0) {
327			error = EBUSY;
328			goto out;
329		}
330
331		while (dmat != NULL) {
332			bus_dma_tag_t parent;
333
334			parent = dmat->parent;
335			atomic_subtract_int(&dmat->ref_count, 1);
336			if (dmat->ref_count == 0) {
337				if (dmat->segments != NULL)
338					free(dmat->segments, M_DEVBUF);
339				free(dmat, M_DEVBUF);
340				/*
341				 * Last reference count, so
342				 * release our reference
343				 * count on our parent.
344				 */
345				dmat = parent;
346			} else
347				dmat = NULL;
348		}
349	}
350out:
351	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
352	return (error);
353}
354
355/*
356 * Allocate a handle for mapping from kva/uva/physical
357 * address space into bus device space.
358 */
359int
360bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
361{
362	int error;
363
364	error = 0;
365
366	if (dmat->segments == NULL) {
367		dmat->segments = (bus_dma_segment_t *)malloc(
368		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
369		    M_NOWAIT);
370		if (dmat->segments == NULL) {
371			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
372			    __func__, dmat, ENOMEM);
373			return (ENOMEM);
374		}
375	}
376
377	/*
378	 * Bouncing might be required if the driver asks for an active
379	 * exclusion region, a data alignment that is stricter than 1, and/or
380	 * an active address boundary.
381	 */
382	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
383
384		/* Must bounce */
385		struct bounce_zone *bz;
386		int maxpages;
387
388		if (dmat->bounce_zone == NULL) {
389			if ((error = alloc_bounce_zone(dmat)) != 0)
390				return (error);
391		}
392		bz = dmat->bounce_zone;
393
394		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
395					     M_NOWAIT | M_ZERO);
396		if (*mapp == NULL) {
397			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
398			    __func__, dmat, ENOMEM);
399			return (ENOMEM);
400		}
401
402		/* Initialize the new map */
403		STAILQ_INIT(&((*mapp)->bpages));
404
405		/*
406		 * Attempt to add pages to our pool on a per-instance
407		 * basis up to a sane limit.
408		 */
409		if (dmat->alignment > 1)
410			maxpages = MAX_BPAGES;
411		else
412			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
413		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
414		 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
415			int pages;
416
417			pages = MAX(atop(dmat->maxsize), 1);
418			pages = MIN(maxpages - bz->total_bpages, pages);
419			pages = MAX(pages, 1);
420			if (alloc_bounce_pages(dmat, pages) < pages)
421				error = ENOMEM;
422
423			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
424				if (error == 0)
425					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
426			} else {
427				error = 0;
428			}
429		}
430	} else {
431		*mapp = NULL;
432	}
433	if (error == 0)
434		dmat->map_count++;
435	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
436	    __func__, dmat, dmat->flags, error);
437	return (error);
438}
439
440/*
441 * Destroy a handle for mapping from kva/uva/physical
442 * address space into bus device space.
443 */
444int
445bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
446{
447	if (map != NULL && map != &nobounce_dmamap) {
448		if (STAILQ_FIRST(&map->bpages) != NULL) {
449			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
450			    __func__, dmat, EBUSY);
451			return (EBUSY);
452		}
453		free(map, M_DEVBUF);
454	}
455	dmat->map_count--;
456	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
457	return (0);
458}
459
460
461/*
462 * Allocate a piece of memory that can be efficiently mapped into
463 * bus device space based on the constraints lited in the dma tag.
464 * A dmamap to for use with dmamap_load is also allocated.
465 */
466int
467bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
468		 bus_dmamap_t *mapp)
469{
470	int mflags;
471
472	if (flags & BUS_DMA_NOWAIT)
473		mflags = M_NOWAIT;
474	else
475		mflags = M_WAITOK;
476	if (flags & BUS_DMA_ZERO)
477		mflags |= M_ZERO;
478
479	/* If we succeed, no mapping/bouncing will be required */
480	*mapp = NULL;
481
482	if (dmat->segments == NULL) {
483		dmat->segments = (bus_dma_segment_t *)malloc(
484		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
485		    M_NOWAIT);
486		if (dmat->segments == NULL) {
487			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
488			    __func__, dmat, dmat->flags, ENOMEM);
489			return (ENOMEM);
490		}
491	}
492
493	if ((dmat->maxsize <= PAGE_SIZE) &&
494	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
495		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
496	} else {
497		/*
498		 * XXX Use Contigmalloc until it is merged into this facility
499		 *     and handles multi-seg allocations.  Nobody is doing
500		 *     multi-seg allocations yet though.
501		 * XXX Certain AGP hardware does.
502		 */
503		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
504		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
505		    dmat->boundary);
506	}
507	if (*vaddr == NULL) {
508		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
509		    __func__, dmat, dmat->flags, ENOMEM);
510		return (ENOMEM);
511	}
512	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
513	    __func__, dmat, dmat->flags, ENOMEM);
514	return (0);
515}
516
517/*
518 * Free a piece of memory and it's allociated dmamap, that was allocated
519 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
520 */
521void
522bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
523{
524	/*
525	 * dmamem does not need to be bounced, so the map should be
526	 * NULL
527	 */
528	if (map != NULL)
529		panic("bus_dmamem_free: Invalid map freed\n");
530	if ((dmat->maxsize <= PAGE_SIZE)
531	 && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
532		free(vaddr, M_DEVBUF);
533	else {
534		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
535	}
536	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
537}
538
539/*
540 * Utility function to load a linear buffer.  lastaddrp holds state
541 * between invocations (for multiple-buffer loads).  segp contains
542 * the starting segment on entrace, and the ending segment on exit.
543 * first indicates if this is the first invocation of this function.
544 */
545static __inline int
546_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
547    			bus_dmamap_t map,
548			void *buf, bus_size_t buflen,
549			pmap_t pmap,
550			int flags,
551			bus_addr_t *lastaddrp,
552			bus_dma_segment_t *segs,
553			int *segp,
554			int first)
555{
556	bus_size_t sgsize;
557	bus_addr_t curaddr, lastaddr, baddr, bmask;
558	vm_offset_t vaddr;
559	bus_addr_t paddr;
560	int needbounce = 0;
561	int seg;
562
563	if (map == NULL)
564		map = &nobounce_dmamap;
565
566	if ((map != &nobounce_dmamap && map->pagesneeded == 0)
567	 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
568		vm_offset_t	vendaddr;
569
570		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
571		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
572		    dmat->boundary, dmat->alignment);
573		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
574		    map, &nobounce_dmamap, map->pagesneeded);
575		/*
576		 * Count the number of bounce pages
577		 * needed in order to complete this transfer
578		 */
579		vaddr = trunc_page((vm_offset_t)buf);
580		vendaddr = (vm_offset_t)buf + buflen;
581
582		while (vaddr < vendaddr) {
583			paddr = pmap_kextract(vaddr);
584			if (run_filter(dmat, paddr) != 0) {
585				needbounce = 1;
586				map->pagesneeded++;
587			}
588			vaddr += PAGE_SIZE;
589		}
590		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
591	}
592
593	/* Reserve Necessary Bounce Pages */
594	if (map->pagesneeded != 0) {
595		mtx_lock(&bounce_lock);
596		if (flags & BUS_DMA_NOWAIT) {
597			if (reserve_bounce_pages(dmat, map, 0) != 0) {
598				mtx_unlock(&bounce_lock);
599				return (ENOMEM);
600			}
601		} else {
602			if (reserve_bounce_pages(dmat, map, 1) != 0) {
603				/* Queue us for resources */
604				map->dmat = dmat;
605				map->buf = buf;
606				map->buflen = buflen;
607				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
608				    map, links);
609				mtx_unlock(&bounce_lock);
610				return (EINPROGRESS);
611			}
612		}
613		mtx_unlock(&bounce_lock);
614	}
615
616	vaddr = (vm_offset_t)buf;
617	lastaddr = *lastaddrp;
618	bmask = ~(dmat->boundary - 1);
619
620	for (seg = *segp; buflen > 0 ; ) {
621		/*
622		 * Get the physical address for this segment.
623		 */
624		if (pmap)
625			curaddr = pmap_extract(pmap, vaddr);
626		else
627			curaddr = pmap_kextract(vaddr);
628
629		/*
630		 * Compute the segment size, and adjust counts.
631		 */
632		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
633		if (buflen < sgsize)
634			sgsize = buflen;
635
636		/*
637		 * Make sure we don't cross any boundaries.
638		 */
639		if (dmat->boundary > 0) {
640			baddr = (curaddr + dmat->boundary) & bmask;
641			if (sgsize > (baddr - curaddr))
642				sgsize = (baddr - curaddr);
643		}
644
645		if (map->pagesneeded != 0 && run_filter(dmat, curaddr))
646			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
647
648		/*
649		 * Insert chunk into a segment, coalescing with
650		 * previous segment if possible.
651		 */
652		if (first) {
653			segs[seg].ds_addr = curaddr;
654			segs[seg].ds_len = sgsize;
655			first = 0;
656		} else {
657			if (needbounce == 0 && curaddr == lastaddr &&
658			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
659			    (dmat->boundary == 0 ||
660			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
661				segs[seg].ds_len += sgsize;
662			else {
663				if (++seg >= dmat->nsegments)
664					break;
665				segs[seg].ds_addr = curaddr;
666				segs[seg].ds_len = sgsize;
667			}
668		}
669
670		lastaddr = curaddr + sgsize;
671		vaddr += sgsize;
672		buflen -= sgsize;
673	}
674
675	*segp = seg;
676	*lastaddrp = lastaddr;
677
678	/*
679	 * Did we fit?
680	 */
681	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
682}
683
684/*
685 * Map the buffer buf into bus space using the dmamap map.
686 */
687int
688bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
689		bus_size_t buflen, bus_dmamap_callback_t *callback,
690		void *callback_arg, int flags)
691{
692	bus_addr_t		lastaddr = 0;
693	int			error, nsegs = 0;
694
695	if (map != NULL) {
696		flags |= BUS_DMA_WAITOK;
697		map->callback = callback;
698		map->callback_arg = callback_arg;
699	}
700
701	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
702	     &lastaddr, dmat->segments, &nsegs, 1);
703
704	if (error == EINPROGRESS) {
705		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
706		    __func__, dmat, dmat->flags, error);
707		return (error);
708	}
709
710	if (error)
711		(*callback)(callback_arg, dmat->segments, 0, error);
712	else
713		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
714
715	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error 0 nsegs %d",
716	    __func__, dmat, dmat->flags, nsegs + 1);
717	return (0);
718}
719
720
721/*
722 * Like _bus_dmamap_load(), but for mbufs.
723 */
724int
725bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
726		     struct mbuf *m0,
727		     bus_dmamap_callback2_t *callback, void *callback_arg,
728		     int flags)
729{
730	int nsegs, error;
731
732	M_ASSERTPKTHDR(m0);
733
734	flags |= BUS_DMA_NOWAIT;
735	nsegs = 0;
736	error = 0;
737	if (m0->m_pkthdr.len <= dmat->maxsize) {
738		int first = 1;
739		bus_addr_t lastaddr = 0;
740		struct mbuf *m;
741
742		for (m = m0; m != NULL && error == 0; m = m->m_next) {
743			if (m->m_len > 0) {
744				error = _bus_dmamap_load_buffer(dmat, map,
745						m->m_data, m->m_len,
746						NULL, flags, &lastaddr,
747						dmat->segments, &nsegs, first);
748				first = 0;
749			}
750		}
751	} else {
752		error = EINVAL;
753	}
754
755	if (error) {
756		/* force "no valid mappings" in callback */
757		(*callback)(callback_arg, dmat->segments, 0, 0, error);
758	} else {
759		(*callback)(callback_arg, dmat->segments,
760			    nsegs+1, m0->m_pkthdr.len, error);
761	}
762	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
763	    __func__, dmat, dmat->flags, error, nsegs + 1);
764	return (error);
765}
766
767int
768bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
769			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
770			int flags)
771{
772	int error;
773
774	M_ASSERTPKTHDR(m0);
775
776	flags |= BUS_DMA_NOWAIT;
777	*nsegs = 0;
778	error = 0;
779	if (m0->m_pkthdr.len <= dmat->maxsize) {
780		int first = 1;
781		bus_addr_t lastaddr = 0;
782		struct mbuf *m;
783
784		for (m = m0; m != NULL && error == 0; m = m->m_next) {
785			if (m->m_len > 0) {
786				error = _bus_dmamap_load_buffer(dmat, map,
787						m->m_data, m->m_len,
788						NULL, flags, &lastaddr,
789						segs, nsegs, first);
790				first = 0;
791			}
792		}
793	} else {
794		error = EINVAL;
795	}
796
797	/* XXX FIXME: Having to increment nsegs is really annoying */
798	++*nsegs;
799	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
800	    __func__, dmat, dmat->flags, error, *nsegs);
801	return (error);
802}
803
804/*
805 * Like _bus_dmamap_load(), but for uios.
806 */
807int
808bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
809		    struct uio *uio,
810		    bus_dmamap_callback2_t *callback, void *callback_arg,
811		    int flags)
812{
813	bus_addr_t lastaddr;
814	int nsegs, error, first, i;
815	bus_size_t resid;
816	struct iovec *iov;
817	pmap_t pmap;
818
819	flags |= BUS_DMA_NOWAIT;
820	resid = uio->uio_resid;
821	iov = uio->uio_iov;
822
823	if (uio->uio_segflg == UIO_USERSPACE) {
824		KASSERT(uio->uio_td != NULL,
825			("bus_dmamap_load_uio: USERSPACE but no proc"));
826		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
827	} else
828		pmap = NULL;
829
830	nsegs = 0;
831	error = 0;
832	first = 1;
833	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
834		/*
835		 * Now at the first iovec to load.  Load each iovec
836		 * until we have exhausted the residual count.
837		 */
838		bus_size_t minlen =
839			resid < iov[i].iov_len ? resid : iov[i].iov_len;
840		caddr_t addr = (caddr_t) iov[i].iov_base;
841
842		if (minlen > 0) {
843			error = _bus_dmamap_load_buffer(dmat, map,
844					addr, minlen, pmap, flags, &lastaddr,
845					dmat->segments, &nsegs, first);
846			first = 0;
847
848			resid -= minlen;
849		}
850	}
851
852	if (error) {
853		/* force "no valid mappings" in callback */
854		(*callback)(callback_arg, dmat->segments, 0, 0, error);
855	} else {
856		(*callback)(callback_arg, dmat->segments,
857			    nsegs+1, uio->uio_resid, error);
858	}
859	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
860	    __func__, dmat, dmat->flags, error, nsegs + 1);
861	return (error);
862}
863
864/*
865 * Release the mapping held by map.
866 */
867void
868_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
869{
870	struct bounce_page *bpage;
871
872	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
873		STAILQ_REMOVE_HEAD(&map->bpages, links);
874		free_bounce_page(dmat, bpage);
875	}
876}
877
878void
879_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
880{
881	struct bounce_page *bpage;
882
883	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
884		/*
885		 * Handle data bouncing.  We might also
886		 * want to add support for invalidating
887		 * the caches on broken hardware
888		 */
889		dmat->bounce_zone->total_bounced++;
890		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
891		    "performing bounce", __func__, op, dmat, dmat->flags);
892
893		if (op & BUS_DMASYNC_PREWRITE) {
894			while (bpage != NULL) {
895				bcopy((void *)bpage->datavaddr,
896				      (void *)bpage->vaddr,
897				      bpage->datacount);
898				bpage = STAILQ_NEXT(bpage, links);
899			}
900		}
901
902		if (op & BUS_DMASYNC_POSTREAD) {
903			while (bpage != NULL) {
904				bcopy((void *)bpage->vaddr,
905				      (void *)bpage->datavaddr,
906				      bpage->datacount);
907				bpage = STAILQ_NEXT(bpage, links);
908			}
909		}
910	}
911}
912
913static void
914init_bounce_pages(void *dummy __unused)
915{
916
917	total_bpages = 0;
918	STAILQ_INIT(&bounce_zone_list);
919	STAILQ_INIT(&bounce_map_waitinglist);
920	STAILQ_INIT(&bounce_map_callbacklist);
921	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
922}
923SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
924
925static struct sysctl_ctx_list *
926busdma_sysctl_tree(struct bounce_zone *bz)
927{
928	return (&bz->sysctl_tree);
929}
930
931static struct sysctl_oid *
932busdma_sysctl_tree_top(struct bounce_zone *bz)
933{
934	return (bz->sysctl_tree_top);
935}
936
937static int
938alloc_bounce_zone(bus_dma_tag_t dmat)
939{
940	struct bounce_zone *bz;
941
942	/* Check to see if we already have a suitable zone */
943	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
944		if ((dmat->alignment <= bz->alignment)
945		 && (dmat->boundary <= bz->boundary)
946		 && (dmat->lowaddr >= bz->lowaddr)) {
947			dmat->bounce_zone = bz;
948			return (0);
949		}
950	}
951
952	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
953	    M_NOWAIT | M_ZERO)) == NULL)
954		return (ENOMEM);
955
956	STAILQ_INIT(&bz->bounce_page_list);
957	bz->free_bpages = 0;
958	bz->reserved_bpages = 0;
959	bz->active_bpages = 0;
960	bz->lowaddr = dmat->lowaddr;
961	bz->alignment = dmat->alignment;
962	bz->boundary = dmat->boundary;
963	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
964	busdma_zonecount++;
965	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
966	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
967	dmat->bounce_zone = bz;
968
969	sysctl_ctx_init(&bz->sysctl_tree);
970	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
971	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
972	    CTLFLAG_RD, 0, "");
973	if (bz->sysctl_tree_top == NULL) {
974		sysctl_ctx_free(&bz->sysctl_tree);
975		return (0);	/* XXX error code? */
976	}
977
978	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
979	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
980	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
981	    "Total bounce pages");
982	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
983	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
984	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
985	    "Free bounce pages");
986	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
987	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
988	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
989	    "Reserved bounce pages");
990	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
991	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
992	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
993	    "Active bounce pages");
994	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
995	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
996	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
997	    "Total bounce requests");
998	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
999	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1000	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1001	    "Total bounce requests that were deferred");
1002	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1003	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1004	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1005	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1006	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1007	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1008	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1009	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1010	    "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
1011
1012	return (0);
1013}
1014
1015static int
1016alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1017{
1018	struct bounce_zone *bz;
1019	int count;
1020
1021	bz = dmat->bounce_zone;
1022	count = 0;
1023	while (numpages > 0) {
1024		struct bounce_page *bpage;
1025
1026		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1027						     M_NOWAIT | M_ZERO);
1028
1029		if (bpage == NULL)
1030			break;
1031		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1032							 M_NOWAIT, 0ul,
1033							 bz->lowaddr,
1034							 PAGE_SIZE,
1035							 bz->boundary);
1036		if (bpage->vaddr == 0) {
1037			free(bpage, M_DEVBUF);
1038			break;
1039		}
1040		bpage->busaddr = pmap_kextract(bpage->vaddr);
1041		mtx_lock(&bounce_lock);
1042		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1043		total_bpages++;
1044		bz->total_bpages++;
1045		bz->free_bpages++;
1046		mtx_unlock(&bounce_lock);
1047		count++;
1048		numpages--;
1049	}
1050	return (count);
1051}
1052
1053static int
1054reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1055{
1056	struct bounce_zone *bz;
1057	int pages;
1058
1059	mtx_assert(&bounce_lock, MA_OWNED);
1060	bz = dmat->bounce_zone;
1061	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1062	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1063		return (map->pagesneeded - (map->pagesreserved + pages));
1064	bz->free_bpages -= pages;
1065	bz->reserved_bpages += pages;
1066	map->pagesreserved += pages;
1067	pages = map->pagesneeded - map->pagesreserved;
1068
1069	return (pages);
1070}
1071
1072static bus_addr_t
1073add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1074		bus_size_t size)
1075{
1076	struct bounce_zone *bz;
1077	struct bounce_page *bpage;
1078
1079	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1080	KASSERT(map != NULL && map != &nobounce_dmamap,
1081	    ("add_bounce_page: bad map %p", map));
1082
1083	bz = dmat->bounce_zone;
1084	if (map->pagesneeded == 0)
1085		panic("add_bounce_page: map doesn't need any pages");
1086	map->pagesneeded--;
1087
1088	if (map->pagesreserved == 0)
1089		panic("add_bounce_page: map doesn't need any pages");
1090	map->pagesreserved--;
1091
1092	mtx_lock(&bounce_lock);
1093	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1094	if (bpage == NULL)
1095		panic("add_bounce_page: free page list is empty");
1096
1097	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1098	bz->reserved_bpages--;
1099	bz->active_bpages++;
1100	mtx_unlock(&bounce_lock);
1101
1102	bpage->datavaddr = vaddr;
1103	bpage->datacount = size;
1104	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1105	return (bpage->busaddr);
1106}
1107
1108static void
1109free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1110{
1111	struct bus_dmamap *map;
1112	struct bounce_zone *bz;
1113
1114	bz = dmat->bounce_zone;
1115	bpage->datavaddr = 0;
1116	bpage->datacount = 0;
1117
1118	mtx_lock(&bounce_lock);
1119	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1120	bz->free_bpages++;
1121	bz->active_bpages--;
1122	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1123		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1124			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1125			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1126					   map, links);
1127			busdma_swi_pending = 1;
1128			bz->total_deferred++;
1129			swi_sched(vm_ih, 0);
1130		}
1131	}
1132	mtx_unlock(&bounce_lock);
1133}
1134
1135void
1136busdma_swi(void)
1137{
1138	bus_dma_tag_t dmat;
1139	struct bus_dmamap *map;
1140
1141	mtx_lock(&bounce_lock);
1142	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1143		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1144		mtx_unlock(&bounce_lock);
1145		dmat = map->dmat;
1146		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1147		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1148				map->callback, map->callback_arg, /*flags*/0);
1149		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1150		mtx_lock(&bounce_lock);
1151	}
1152	mtx_unlock(&bounce_lock);
1153}
1154