busdma_machdep.c revision 209812
1139825Simp/*-
299657Sbenno * Copyright (c) 1997, 1998 Justin T. Gibbs.
378342Sbenno * All rights reserved.
478342Sbenno *
578342Sbenno * Redistribution and use in source and binary forms, with or without
678342Sbenno * modification, are permitted provided that the following conditions
778342Sbenno * are met:
878342Sbenno * 1. Redistributions of source code must retain the above copyright
999657Sbenno *    notice, this list of conditions, and the following disclaimer,
1099657Sbenno *    without modification, immediately at the beginning of the file.
1199657Sbenno * 2. The name of the author may not be used to endorse or promote products
1299657Sbenno *    derived from this software without specific prior written permission.
1378342Sbenno *
1499657Sbenno * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1599657Sbenno * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1699657Sbenno * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1799657Sbenno * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1899657Sbenno * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1999657Sbenno * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2099657Sbenno * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2199657Sbenno * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2299657Sbenno * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2399657Sbenno * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2499657Sbenno * SUCH DAMAGE.
2578342Sbenno */
2678342Sbenno
27209812Snwhitehorn/*
28209812Snwhitehorn * From amd64/busdma_machdep.c, r204214
29209812Snwhitehorn */
30209812Snwhitehorn
31113038Sobrien#include <sys/cdefs.h>
32113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 209812 2010-07-08 15:38:55Z nwhitehorn $");
3378342Sbenno
3499657Sbenno#include <sys/param.h>
3599657Sbenno#include <sys/systm.h>
3699657Sbenno#include <sys/malloc.h>
3799657Sbenno#include <sys/bus.h>
3899657Sbenno#include <sys/interrupt.h>
39209812Snwhitehorn#include <sys/kernel.h>
40209812Snwhitehorn#include <sys/ktr.h>
4199657Sbenno#include <sys/lock.h>
4299657Sbenno#include <sys/proc.h>
4399657Sbenno#include <sys/mutex.h>
44108939Sgrehan#include <sys/mbuf.h>
45108939Sgrehan#include <sys/uio.h>
46209812Snwhitehorn#include <sys/sysctl.h>
4799657Sbenno
4899657Sbenno#include <vm/vm.h>
4999657Sbenno#include <vm/vm_page.h>
50108939Sgrehan#include <vm/vm_map.h>
5199657Sbenno
52112436Smux#include <machine/atomic.h>
5399657Sbenno#include <machine/bus.h>
54209812Snwhitehorn#include <machine/md_var.h>
5599657Sbenno
56209812Snwhitehorn#define MAX_BPAGES 8192
57209812Snwhitehorn
58209812Snwhitehornstruct bounce_zone;
59209812Snwhitehorn
6099657Sbennostruct bus_dma_tag {
61209812Snwhitehorn	bus_dma_tag_t	  parent;
62209812Snwhitehorn	bus_size_t	  alignment;
63209812Snwhitehorn	bus_size_t	  boundary;
64209812Snwhitehorn	bus_addr_t	  lowaddr;
65209812Snwhitehorn	bus_addr_t	  highaddr;
6699657Sbenno	bus_dma_filter_t *filter;
67209812Snwhitehorn	void		 *filterarg;
68209812Snwhitehorn	bus_size_t	  maxsize;
69209812Snwhitehorn	u_int		  nsegments;
70209812Snwhitehorn	bus_size_t	  maxsegsz;
71209812Snwhitehorn	int		  flags;
72209812Snwhitehorn	int		  ref_count;
73209812Snwhitehorn	int		  map_count;
74117126Sscottl	bus_dma_lock_t	 *lockfunc;
75117126Sscottl	void		 *lockfuncarg;
76209812Snwhitehorn	bus_dma_segment_t *segments;
77209812Snwhitehorn	struct bounce_zone *bounce_zone;
7899657Sbenno};
7999657Sbenno
80209812Snwhitehornstruct bounce_page {
81209812Snwhitehorn	vm_offset_t	vaddr;		/* kva of bounce buffer */
82209812Snwhitehorn	bus_addr_t	busaddr;	/* Physical address */
83209812Snwhitehorn	vm_offset_t	datavaddr;	/* kva of client data */
84209812Snwhitehorn	bus_size_t	datacount;	/* client data count */
85209812Snwhitehorn	STAILQ_ENTRY(bounce_page) links;
86209812Snwhitehorn};
87209812Snwhitehorn
88209812Snwhitehornint busdma_swi_pending;
89209812Snwhitehorn
90209812Snwhitehornstruct bounce_zone {
91209812Snwhitehorn	STAILQ_ENTRY(bounce_zone) links;
92209812Snwhitehorn	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
93209812Snwhitehorn	int		total_bpages;
94209812Snwhitehorn	int		free_bpages;
95209812Snwhitehorn	int		reserved_bpages;
96209812Snwhitehorn	int		active_bpages;
97209812Snwhitehorn	int		total_bounced;
98209812Snwhitehorn	int		total_deferred;
99209812Snwhitehorn	int		map_count;
100209812Snwhitehorn	bus_size_t	alignment;
101209812Snwhitehorn	bus_addr_t	lowaddr;
102209812Snwhitehorn	char		zoneid[8];
103209812Snwhitehorn	char		lowaddrid[20];
104209812Snwhitehorn	struct sysctl_ctx_list sysctl_tree;
105209812Snwhitehorn	struct sysctl_oid *sysctl_tree_top;
106209812Snwhitehorn};
107209812Snwhitehorn
108209812Snwhitehornstatic struct mtx bounce_lock;
109209812Snwhitehornstatic int total_bpages;
110209812Snwhitehornstatic int busdma_zonecount;
111209812Snwhitehornstatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
112209812Snwhitehorn
113209812SnwhitehornSYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
114209812SnwhitehornSYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
115209812Snwhitehorn	   "Total bounce pages");
116209812Snwhitehorn
11799657Sbennostruct bus_dmamap {
118209812Snwhitehorn	struct bp_list	       bpages;
119209812Snwhitehorn	int		       pagesneeded;
120209812Snwhitehorn	int		       pagesreserved;
121209812Snwhitehorn	bus_dma_tag_t	       dmat;
122209812Snwhitehorn	void		      *buf;		/* unmapped buffer pointer */
123209812Snwhitehorn	bus_size_t	       buflen;		/* unmapped buffer length */
124209812Snwhitehorn	bus_dmamap_callback_t *callback;
125209812Snwhitehorn	void		      *callback_arg;
126209812Snwhitehorn	STAILQ_ENTRY(bus_dmamap) links;
12799657Sbenno};
12899657Sbenno
129209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
130209812Snwhitehornstatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
131209812Snwhitehornstatic struct bus_dmamap nobounce_dmamap;
132209812Snwhitehorn
133209812Snwhitehornstatic void init_bounce_pages(void *dummy);
134209812Snwhitehornstatic int alloc_bounce_zone(bus_dma_tag_t dmat);
135209812Snwhitehornstatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
136209812Snwhitehornstatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
137209812Snwhitehorn				int commit);
138209812Snwhitehornstatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
139209812Snwhitehorn				   vm_offset_t vaddr, bus_size_t size);
140209812Snwhitehornstatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
141209812Snwhitehornstatic __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
142209812Snwhitehorn
14399657Sbenno/*
144209812Snwhitehorn * Return true if a match is made.
145209812Snwhitehorn *
146209812Snwhitehorn * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
147209812Snwhitehorn *
148209812Snwhitehorn * If paddr is within the bounds of the dma tag then call the filter callback
149209812Snwhitehorn * to check for a match, if there is no filter callback then assume a match.
150209812Snwhitehorn */
151209812Snwhitehornstatic __inline int
152209812Snwhitehornrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
153209812Snwhitehorn{
154209812Snwhitehorn	int retval;
155209812Snwhitehorn
156209812Snwhitehorn	retval = 0;
157209812Snwhitehorn
158209812Snwhitehorn	do {
159209812Snwhitehorn		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
160209812Snwhitehorn		 || ((paddr & (dmat->alignment - 1)) != 0))
161209812Snwhitehorn		 && (dmat->filter == NULL
162209812Snwhitehorn		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
163209812Snwhitehorn			retval = 1;
164209812Snwhitehorn
165209812Snwhitehorn		dmat = dmat->parent;
166209812Snwhitehorn	} while (retval == 0 && dmat != NULL);
167209812Snwhitehorn	return (retval);
168209812Snwhitehorn}
169209812Snwhitehorn
170209812Snwhitehorn/*
171117126Sscottl * Convenience function for manipulating driver locks from busdma (during
172117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
173117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
174117126Sscottl * non-mutex locking scheme don't have to use this at all.
175117126Sscottl */
176117126Sscottlvoid
177117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
178117126Sscottl{
179117126Sscottl	struct mtx *dmtx;
180117126Sscottl
181117126Sscottl	dmtx = (struct mtx *)arg;
182117126Sscottl	switch (op) {
183117126Sscottl	case BUS_DMA_LOCK:
184117126Sscottl		mtx_lock(dmtx);
185117126Sscottl		break;
186117126Sscottl	case BUS_DMA_UNLOCK:
187117126Sscottl		mtx_unlock(dmtx);
188117126Sscottl		break;
189117126Sscottl	default:
190117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
191117126Sscottl	}
192117126Sscottl}
193117126Sscottl
194117126Sscottl/*
195117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
196117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
197117126Sscottl * with the tag are meant to never be defered.
198117126Sscottl * XXX Should have a way to identify which driver is responsible here.
199117126Sscottl */
200117126Sscottlstatic void
201117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
202117126Sscottl{
203117126Sscottl	panic("driver error: busdma dflt_lock called");
204117126Sscottl}
205117126Sscottl
206209812Snwhitehorn#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
207209812Snwhitehorn#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
208117126Sscottl/*
20999657Sbenno * Allocate a device specific dma_tag.
21099657Sbenno */
21199657Sbennoint
21299657Sbennobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
213209812Snwhitehorn		   bus_size_t boundary, bus_addr_t lowaddr,
214209812Snwhitehorn		   bus_addr_t highaddr, bus_dma_filter_t *filter,
215209812Snwhitehorn		   void *filterarg, bus_size_t maxsize, int nsegments,
216209812Snwhitehorn		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
217209812Snwhitehorn		   void *lockfuncarg, bus_dma_tag_t *dmat)
21899657Sbenno{
21999657Sbenno	bus_dma_tag_t newtag;
22099657Sbenno	int error = 0;
22199657Sbenno
222209812Snwhitehorn	/* Basic sanity checking */
223209812Snwhitehorn	if (boundary != 0 && boundary < maxsegsz)
224209812Snwhitehorn		maxsegsz = boundary;
225209812Snwhitehorn
226209812Snwhitehorn	if (maxsegsz == 0) {
227209812Snwhitehorn		return (EINVAL);
228209812Snwhitehorn	}
229209812Snwhitehorn
23099657Sbenno	/* Return a NULL tag on failure */
23199657Sbenno	*dmat = NULL;
23299657Sbenno
233209812Snwhitehorn	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
234209812Snwhitehorn	    M_ZERO | M_NOWAIT);
235209812Snwhitehorn	if (newtag == NULL) {
236209812Snwhitehorn		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
237209812Snwhitehorn		    __func__, newtag, 0, error);
23899657Sbenno		return (ENOMEM);
239209812Snwhitehorn	}
24099657Sbenno
24199657Sbenno	newtag->parent = parent;
24299657Sbenno	newtag->alignment = alignment;
24399657Sbenno	newtag->boundary = boundary;
244209812Snwhitehorn	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
245209812Snwhitehorn	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1);
24699657Sbenno	newtag->filter = filter;
24799657Sbenno	newtag->filterarg = filterarg;
248209812Snwhitehorn	newtag->maxsize = maxsize;
249209812Snwhitehorn	newtag->nsegments = nsegments;
25099657Sbenno	newtag->maxsegsz = maxsegsz;
25199657Sbenno	newtag->flags = flags;
25299657Sbenno	newtag->ref_count = 1; /* Count ourself */
25399657Sbenno	newtag->map_count = 0;
254117126Sscottl	if (lockfunc != NULL) {
255117126Sscottl		newtag->lockfunc = lockfunc;
256117126Sscottl		newtag->lockfuncarg = lockfuncarg;
257117126Sscottl	} else {
258117126Sscottl		newtag->lockfunc = dflt_lock;
259117126Sscottl		newtag->lockfuncarg = NULL;
260117126Sscottl	}
261209812Snwhitehorn	newtag->segments = NULL;
26299657Sbenno
263209812Snwhitehorn	/* Take into account any restrictions imposed by our parent tag */
264209812Snwhitehorn	if (parent != NULL) {
265209812Snwhitehorn		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
266209812Snwhitehorn		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
267134934Sscottl		if (newtag->boundary == 0)
268134934Sscottl			newtag->boundary = parent->boundary;
269134934Sscottl		else if (parent->boundary != 0)
270209812Snwhitehorn			newtag->boundary = MIN(parent->boundary,
271134934Sscottl					       newtag->boundary);
272209812Snwhitehorn		if (newtag->filter == NULL) {
273209812Snwhitehorn			/*
274209812Snwhitehorn			 * Short circuit looking at our parent directly
275209812Snwhitehorn			 * since we have encapsulated all of its information
276209812Snwhitehorn			 */
277209812Snwhitehorn			newtag->filter = parent->filter;
278209812Snwhitehorn			newtag->filterarg = parent->filterarg;
279209812Snwhitehorn			newtag->parent = parent->parent;
28099657Sbenno		}
281112436Smux		if (newtag->parent != NULL)
282112436Smux			atomic_add_int(&parent->ref_count, 1);
28399657Sbenno	}
28499657Sbenno
285209812Snwhitehorn	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
286209812Snwhitehorn	 || newtag->alignment > 1)
287209812Snwhitehorn		newtag->flags |= BUS_DMA_COULD_BOUNCE;
288209812Snwhitehorn
289209812Snwhitehorn	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
290209812Snwhitehorn	    (flags & BUS_DMA_ALLOCNOW) != 0) {
291209812Snwhitehorn		struct bounce_zone *bz;
292209812Snwhitehorn
293209812Snwhitehorn		/* Must bounce */
294209812Snwhitehorn
295209812Snwhitehorn		if ((error = alloc_bounce_zone(newtag)) != 0) {
296209812Snwhitehorn			free(newtag, M_DEVBUF);
297209812Snwhitehorn			return (error);
298209812Snwhitehorn		}
299209812Snwhitehorn		bz = newtag->bounce_zone;
300209812Snwhitehorn
301209812Snwhitehorn		if (ptoa(bz->total_bpages) < maxsize) {
302209812Snwhitehorn			int pages;
303209812Snwhitehorn
304209812Snwhitehorn			pages = atop(maxsize) - bz->total_bpages;
305209812Snwhitehorn
306209812Snwhitehorn			/* Add pages to our bounce pool */
307209812Snwhitehorn			if (alloc_bounce_pages(newtag, pages) < pages)
308209812Snwhitehorn				error = ENOMEM;
309209812Snwhitehorn		}
310209812Snwhitehorn		/* Performed initial allocation */
311209812Snwhitehorn		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
312209812Snwhitehorn	}
313209812Snwhitehorn
314209812Snwhitehorn	if (error != 0) {
315209812Snwhitehorn		free(newtag, M_DEVBUF);
316209812Snwhitehorn	} else {
317209812Snwhitehorn		*dmat = newtag;
318209812Snwhitehorn	}
319209812Snwhitehorn	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
320209812Snwhitehorn	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
32199657Sbenno	return (error);
32299657Sbenno}
32399657Sbenno
32499657Sbennoint
32599657Sbennobus_dma_tag_destroy(bus_dma_tag_t dmat)
32699657Sbenno{
327209812Snwhitehorn	bus_dma_tag_t dmat_copy;
328209812Snwhitehorn	int error;
329209812Snwhitehorn
330209812Snwhitehorn	error = 0;
331209812Snwhitehorn	dmat_copy = dmat;
332209812Snwhitehorn
33399657Sbenno	if (dmat != NULL) {
334209812Snwhitehorn
335209812Snwhitehorn		if (dmat->map_count != 0) {
336209812Snwhitehorn			error = EBUSY;
337209812Snwhitehorn			goto out;
338209812Snwhitehorn		}
339209812Snwhitehorn
340209812Snwhitehorn		while (dmat != NULL) {
341209812Snwhitehorn			bus_dma_tag_t parent;
342209812Snwhitehorn
343209812Snwhitehorn			parent = dmat->parent;
344209812Snwhitehorn			atomic_subtract_int(&dmat->ref_count, 1);
345209812Snwhitehorn			if (dmat->ref_count == 0) {
346209812Snwhitehorn				if (dmat->segments != NULL)
347209812Snwhitehorn					free(dmat->segments, M_DEVBUF);
348209812Snwhitehorn				free(dmat, M_DEVBUF);
349209812Snwhitehorn				/*
350209812Snwhitehorn				 * Last reference count, so
351209812Snwhitehorn				 * release our reference
352209812Snwhitehorn				 * count on our parent.
353209812Snwhitehorn				 */
354209812Snwhitehorn				dmat = parent;
355209812Snwhitehorn			} else
356209812Snwhitehorn				dmat = NULL;
357209812Snwhitehorn		}
358209812Snwhitehorn	}
359209812Snwhitehornout:
360209812Snwhitehorn	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
361209812Snwhitehorn	return (error);
36299657Sbenno}
36399657Sbenno
36499657Sbenno/*
36599657Sbenno * Allocate a handle for mapping from kva/uva/physical
36699657Sbenno * address space into bus device space.
36799657Sbenno */
36899657Sbennoint
36999657Sbennobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
37099657Sbenno{
371209812Snwhitehorn	int error;
37299657Sbenno
373209812Snwhitehorn	error = 0;
374209812Snwhitehorn
375209812Snwhitehorn	if (dmat->segments == NULL) {
376209812Snwhitehorn		dmat->segments = (bus_dma_segment_t *)malloc(
377209812Snwhitehorn		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
378209812Snwhitehorn		    M_NOWAIT);
379209812Snwhitehorn		if (dmat->segments == NULL) {
380209812Snwhitehorn			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
381209812Snwhitehorn			    __func__, dmat, ENOMEM);
382209812Snwhitehorn			return (ENOMEM);
383209812Snwhitehorn		}
384209812Snwhitehorn	}
385209812Snwhitehorn
386209812Snwhitehorn	/*
387209812Snwhitehorn	 * Bouncing might be required if the driver asks for an active
388209812Snwhitehorn	 * exclusion region, a data alignment that is stricter than 1, and/or
389209812Snwhitehorn	 * an active address boundary.
390209812Snwhitehorn	 */
391209812Snwhitehorn	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
392209812Snwhitehorn
393209812Snwhitehorn		/* Must bounce */
394209812Snwhitehorn		struct bounce_zone *bz;
395209812Snwhitehorn		int maxpages;
396209812Snwhitehorn
397209812Snwhitehorn		if (dmat->bounce_zone == NULL) {
398209812Snwhitehorn			if ((error = alloc_bounce_zone(dmat)) != 0)
399209812Snwhitehorn				return (error);
400209812Snwhitehorn		}
401209812Snwhitehorn		bz = dmat->bounce_zone;
402209812Snwhitehorn
403209812Snwhitehorn		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
404209812Snwhitehorn					     M_NOWAIT | M_ZERO);
405209812Snwhitehorn		if (*mapp == NULL) {
406209812Snwhitehorn			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
407209812Snwhitehorn			    __func__, dmat, ENOMEM);
408209812Snwhitehorn			return (ENOMEM);
409209812Snwhitehorn		}
410209812Snwhitehorn
411209812Snwhitehorn		/* Initialize the new map */
412209812Snwhitehorn		STAILQ_INIT(&((*mapp)->bpages));
413209812Snwhitehorn
414209812Snwhitehorn		/*
415209812Snwhitehorn		 * Attempt to add pages to our pool on a per-instance
416209812Snwhitehorn		 * basis up to a sane limit.
417209812Snwhitehorn		 */
418209812Snwhitehorn		if (dmat->alignment > 1)
419209812Snwhitehorn			maxpages = MAX_BPAGES;
420209812Snwhitehorn		else
421209812Snwhitehorn			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
422209812Snwhitehorn		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
423209812Snwhitehorn		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
424209812Snwhitehorn			int pages;
425209812Snwhitehorn
426209812Snwhitehorn			pages = MAX(atop(dmat->maxsize), 1);
427209812Snwhitehorn			pages = MIN(maxpages - bz->total_bpages, pages);
428209812Snwhitehorn			pages = MAX(pages, 1);
429209812Snwhitehorn			if (alloc_bounce_pages(dmat, pages) < pages)
430209812Snwhitehorn				error = ENOMEM;
431209812Snwhitehorn
432209812Snwhitehorn			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
433209812Snwhitehorn				if (error == 0)
434209812Snwhitehorn					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
435209812Snwhitehorn			} else {
436209812Snwhitehorn				error = 0;
437209812Snwhitehorn			}
438209812Snwhitehorn		}
439209812Snwhitehorn		bz->map_count++;
440209812Snwhitehorn	} else {
441209812Snwhitehorn		*mapp = NULL;
442209812Snwhitehorn	}
443209812Snwhitehorn	if (error == 0)
444209812Snwhitehorn		dmat->map_count++;
445209812Snwhitehorn	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
446209812Snwhitehorn	    __func__, dmat, dmat->flags, error);
447209812Snwhitehorn	return (error);
44899657Sbenno}
44999657Sbenno
45099657Sbenno/*
45199657Sbenno * Destroy a handle for mapping from kva/uva/physical
45299657Sbenno * address space into bus device space.
45399657Sbenno */
45499657Sbennoint
45599657Sbennobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
45699657Sbenno{
457209812Snwhitehorn	if (map != NULL && map != &nobounce_dmamap) {
458209812Snwhitehorn		if (STAILQ_FIRST(&map->bpages) != NULL) {
459209812Snwhitehorn			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
460209812Snwhitehorn			    __func__, dmat, EBUSY);
461209812Snwhitehorn			return (EBUSY);
462209812Snwhitehorn		}
463209812Snwhitehorn		if (dmat->bounce_zone)
464209812Snwhitehorn			dmat->bounce_zone->map_count--;
465209812Snwhitehorn		free(map, M_DEVBUF);
466209812Snwhitehorn	}
467209812Snwhitehorn	dmat->map_count--;
468209812Snwhitehorn	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
469209812Snwhitehorn	return (0);
47099657Sbenno}
47199657Sbenno
472209812Snwhitehorn
47399657Sbenno/*
47499657Sbenno * Allocate a piece of memory that can be efficiently mapped into
47599657Sbenno * bus device space based on the constraints lited in the dma tag.
47699657Sbenno * A dmamap to for use with dmamap_load is also allocated.
47799657Sbenno */
47899657Sbennoint
47999657Sbennobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
480209812Snwhitehorn		 bus_dmamap_t *mapp)
48199657Sbenno{
482118081Smux	int mflags;
483118081Smux
484118081Smux	if (flags & BUS_DMA_NOWAIT)
485118081Smux		mflags = M_NOWAIT;
486118081Smux	else
487118081Smux		mflags = M_WAITOK;
488209812Snwhitehorn
489209812Snwhitehorn	/* If we succeed, no mapping/bouncing will be required */
490209812Snwhitehorn	*mapp = NULL;
491209812Snwhitehorn
492209812Snwhitehorn	if (dmat->segments == NULL) {
493209812Snwhitehorn		dmat->segments = (bus_dma_segment_t *)malloc(
494209812Snwhitehorn		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
495209812Snwhitehorn		    mflags);
496209812Snwhitehorn		if (dmat->segments == NULL) {
497209812Snwhitehorn			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
498209812Snwhitehorn			    __func__, dmat, dmat->flags, ENOMEM);
499209812Snwhitehorn			return (ENOMEM);
500209812Snwhitehorn		}
501209812Snwhitehorn	}
502118081Smux	if (flags & BUS_DMA_ZERO)
503118081Smux		mflags |= M_ZERO;
504118081Smux
505170421Smarcel	/*
506170421Smarcel	 * XXX:
507170421Smarcel	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
508170421Smarcel	 * alignment guarantees of malloc need to be nailed down, and the
509170421Smarcel	 * code below should be rewritten to take that into account.
510170421Smarcel	 *
511209812Snwhitehorn	 * In the meantime, we'll warn the user if malloc gets it wrong.
512170421Smarcel	 */
513209812Snwhitehorn	if ((dmat->maxsize <= PAGE_SIZE) &&
514209812Snwhitehorn	   (dmat->alignment < dmat->maxsize) &&
515209812Snwhitehorn	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
516170421Smarcel		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
517209812Snwhitehorn	} else {
518209812Snwhitehorn		/*
519209812Snwhitehorn		 * XXX Use Contigmalloc until it is merged into this facility
520209812Snwhitehorn		 *     and handles multi-seg allocations.  Nobody is doing
521209812Snwhitehorn		 *     multi-seg allocations yet though.
522209812Snwhitehorn		 * XXX Certain AGP hardware does.
523209812Snwhitehorn		 */
524209812Snwhitehorn		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
525209812Snwhitehorn		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
526209812Snwhitehorn		    dmat->boundary);
527209812Snwhitehorn	}
528209812Snwhitehorn	if (*vaddr == NULL) {
529209812Snwhitehorn		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
530209812Snwhitehorn		    __func__, dmat, dmat->flags, ENOMEM);
531209812Snwhitehorn		return (ENOMEM);
532209812Snwhitehorn	} else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
533209812Snwhitehorn		printf("bus_dmamem_alloc failed to align memory properly.\n");
534209812Snwhitehorn	}
535209812Snwhitehorn#ifdef NOTYET
536209812Snwhitehorn	if (flags & BUS_DMA_NOCACHE)
537209812Snwhitehorn		pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize,
538209812Snwhitehorn		    PAT_UNCACHEABLE);
539209812Snwhitehorn#endif
540209812Snwhitehorn	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
541209812Snwhitehorn	    __func__, dmat, dmat->flags, 0);
542209812Snwhitehorn	return (0);
54399657Sbenno}
54499657Sbenno
54599657Sbenno/*
546209812Snwhitehorn * Free a piece of memory and it's allociated dmamap, that was allocated
54799657Sbenno * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
54899657Sbenno */
54978342Sbennovoid
55099657Sbennobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
55178342Sbenno{
552209812Snwhitehorn	/*
553209812Snwhitehorn	 * dmamem does not need to be bounced, so the map should be
554209812Snwhitehorn	 * NULL
555209812Snwhitehorn	 */
556209812Snwhitehorn	if (map != NULL)
557209812Snwhitehorn		panic("bus_dmamem_free: Invalid map freed\n");
558209812Snwhitehorn#ifdef NOTYET
559209812Snwhitehorn	pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK);
560209812Snwhitehorn#endif
561209812Snwhitehorn	if ((dmat->maxsize <= PAGE_SIZE) &&
562209812Snwhitehorn	   (dmat->alignment < dmat->maxsize) &&
563209812Snwhitehorn	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
56499657Sbenno		free(vaddr, M_DEVBUF);
565209812Snwhitehorn	else {
56699657Sbenno		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
567209812Snwhitehorn	}
568209812Snwhitehorn	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
56999657Sbenno}
57078342Sbenno
57199657Sbenno/*
572108939Sgrehan * Utility function to load a linear buffer.  lastaddrp holds state
573108939Sgrehan * between invocations (for multiple-buffer loads).  segp contains
574108939Sgrehan * the starting segment on entrance, and the ending segment on exit.
575108939Sgrehan * first indicates if this is the first invocation of this function.
57699657Sbenno */
577209812Snwhitehornstatic __inline int
578209812Snwhitehorn_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
579209812Snwhitehorn    			bus_dmamap_t map,
580209812Snwhitehorn			void *buf, bus_size_t buflen,
581209812Snwhitehorn			pmap_t pmap,
582209812Snwhitehorn			int flags,
583209812Snwhitehorn			bus_addr_t *lastaddrp,
584209812Snwhitehorn			bus_dma_segment_t *segs,
585209812Snwhitehorn			int *segp,
586209812Snwhitehorn			int first)
587108939Sgrehan{
588108939Sgrehan	bus_size_t sgsize;
589108939Sgrehan	bus_addr_t curaddr, lastaddr, baddr, bmask;
590209812Snwhitehorn	vm_offset_t vaddr;
591209812Snwhitehorn	bus_addr_t paddr;
592108939Sgrehan	int seg;
593108939Sgrehan
594209812Snwhitehorn	if (map == NULL)
595209812Snwhitehorn		map = &nobounce_dmamap;
596108939Sgrehan
597209812Snwhitehorn	if ((map != &nobounce_dmamap && map->pagesneeded == 0)
598209812Snwhitehorn	 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
599209812Snwhitehorn		vm_offset_t	vendaddr;
600209812Snwhitehorn
601209812Snwhitehorn		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
602209812Snwhitehorn		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
603209812Snwhitehorn		    dmat->boundary, dmat->alignment);
604209812Snwhitehorn		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
605209812Snwhitehorn		    map, &nobounce_dmamap, map->pagesneeded);
606209812Snwhitehorn		/*
607209812Snwhitehorn		 * Count the number of bounce pages
608209812Snwhitehorn		 * needed in order to complete this transfer
609209812Snwhitehorn		 */
610209812Snwhitehorn		vaddr = (vm_offset_t)buf;
611209812Snwhitehorn		vendaddr = (vm_offset_t)buf + buflen;
612209812Snwhitehorn
613209812Snwhitehorn		while (vaddr < vendaddr) {
614209812Snwhitehorn			bus_size_t sg_len;
615209812Snwhitehorn
616209812Snwhitehorn			sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK);
617209812Snwhitehorn			if (pmap)
618209812Snwhitehorn				paddr = pmap_extract(pmap, vaddr);
619209812Snwhitehorn			else
620209812Snwhitehorn				paddr = pmap_kextract(vaddr);
621209812Snwhitehorn			if (run_filter(dmat, paddr) != 0) {
622209812Snwhitehorn				sg_len = roundup2(sg_len, dmat->alignment);
623209812Snwhitehorn				map->pagesneeded++;
624209812Snwhitehorn			}
625209812Snwhitehorn			vaddr += sg_len;
626209812Snwhitehorn		}
627209812Snwhitehorn		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
628209812Snwhitehorn	}
629209812Snwhitehorn
630209812Snwhitehorn	/* Reserve Necessary Bounce Pages */
631209812Snwhitehorn	if (map->pagesneeded != 0) {
632209812Snwhitehorn		mtx_lock(&bounce_lock);
633209812Snwhitehorn		if (flags & BUS_DMA_NOWAIT) {
634209812Snwhitehorn			if (reserve_bounce_pages(dmat, map, 0) != 0) {
635209812Snwhitehorn				mtx_unlock(&bounce_lock);
636209812Snwhitehorn				return (ENOMEM);
637209812Snwhitehorn			}
638209812Snwhitehorn		} else {
639209812Snwhitehorn			if (reserve_bounce_pages(dmat, map, 1) != 0) {
640209812Snwhitehorn				/* Queue us for resources */
641209812Snwhitehorn				map->dmat = dmat;
642209812Snwhitehorn				map->buf = buf;
643209812Snwhitehorn				map->buflen = buflen;
644209812Snwhitehorn				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
645209812Snwhitehorn				    map, links);
646209812Snwhitehorn				mtx_unlock(&bounce_lock);
647209812Snwhitehorn				return (EINPROGRESS);
648209812Snwhitehorn			}
649209812Snwhitehorn		}
650209812Snwhitehorn		mtx_unlock(&bounce_lock);
651209812Snwhitehorn	}
652209812Snwhitehorn
653209812Snwhitehorn	vaddr = (vm_offset_t)buf;
654108939Sgrehan	lastaddr = *lastaddrp;
655108939Sgrehan	bmask = ~(dmat->boundary - 1);
656108939Sgrehan
657108939Sgrehan	for (seg = *segp; buflen > 0 ; ) {
658209812Snwhitehorn		bus_size_t max_sgsize;
659209812Snwhitehorn
660108939Sgrehan		/*
661108939Sgrehan		 * Get the physical address for this segment.
662108939Sgrehan		 */
663108939Sgrehan		if (pmap)
664108939Sgrehan			curaddr = pmap_extract(pmap, vaddr);
665108939Sgrehan		else
666108939Sgrehan			curaddr = pmap_kextract(vaddr);
667108939Sgrehan
668108939Sgrehan		/*
669108939Sgrehan		 * Compute the segment size, and adjust counts.
670108939Sgrehan		 */
671209812Snwhitehorn		max_sgsize = MIN(buflen, dmat->maxsegsz);
672209812Snwhitehorn		sgsize = PAGE_SIZE - ((vm_offset_t)curaddr & PAGE_MASK);
673209812Snwhitehorn		if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
674209812Snwhitehorn			sgsize = roundup2(sgsize, dmat->alignment);
675209812Snwhitehorn			sgsize = MIN(sgsize, max_sgsize);
676209812Snwhitehorn			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
677209812Snwhitehorn		} else {
678209812Snwhitehorn			sgsize = MIN(sgsize, max_sgsize);
679209812Snwhitehorn		}
680108939Sgrehan
681108939Sgrehan		/*
682108939Sgrehan		 * Make sure we don't cross any boundaries.
683108939Sgrehan		 */
684108939Sgrehan		if (dmat->boundary > 0) {
685108939Sgrehan			baddr = (curaddr + dmat->boundary) & bmask;
686108939Sgrehan			if (sgsize > (baddr - curaddr))
687108939Sgrehan				sgsize = (baddr - curaddr);
688108939Sgrehan		}
689108939Sgrehan
690108939Sgrehan		/*
691108939Sgrehan		 * Insert chunk into a segment, coalescing with
692209812Snwhitehorn		 * previous segment if possible.
693108939Sgrehan		 */
694108939Sgrehan		if (first) {
695108939Sgrehan			segs[seg].ds_addr = curaddr;
696108939Sgrehan			segs[seg].ds_len = sgsize;
697108939Sgrehan			first = 0;
698108939Sgrehan		} else {
699108939Sgrehan			if (curaddr == lastaddr &&
700108939Sgrehan			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
701108939Sgrehan			    (dmat->boundary == 0 ||
702108939Sgrehan			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
703108939Sgrehan				segs[seg].ds_len += sgsize;
704108939Sgrehan			else {
705108939Sgrehan				if (++seg >= dmat->nsegments)
706108939Sgrehan					break;
707108939Sgrehan				segs[seg].ds_addr = curaddr;
708108939Sgrehan				segs[seg].ds_len = sgsize;
709108939Sgrehan			}
710108939Sgrehan		}
711108939Sgrehan
712108939Sgrehan		lastaddr = curaddr + sgsize;
713108939Sgrehan		vaddr += sgsize;
714108939Sgrehan		buflen -= sgsize;
715108939Sgrehan	}
716108939Sgrehan
717108939Sgrehan	*segp = seg;
718108939Sgrehan	*lastaddrp = lastaddr;
719108939Sgrehan
720108939Sgrehan	/*
721108939Sgrehan	 * Did we fit?
722108939Sgrehan	 */
723108939Sgrehan	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
724108939Sgrehan}
725108939Sgrehan
726108939Sgrehan/*
727170979Syongari * Map the buffer buf into bus space using the dmamap map.
728170979Syongari */
729170979Syongariint
730170979Syongaribus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
731209812Snwhitehorn		bus_size_t buflen, bus_dmamap_callback_t *callback,
732209812Snwhitehorn		void *callback_arg, int flags)
733170979Syongari{
734209812Snwhitehorn	bus_addr_t		lastaddr = 0;
735209812Snwhitehorn	int			error, nsegs = 0;
736170979Syongari
737209812Snwhitehorn	if (map != NULL) {
738209812Snwhitehorn		flags |= BUS_DMA_WAITOK;
739209812Snwhitehorn		map->callback = callback;
740209812Snwhitehorn		map->callback_arg = callback_arg;
741209812Snwhitehorn	}
742170979Syongari
743209812Snwhitehorn	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
744209812Snwhitehorn	     &lastaddr, dmat->segments, &nsegs, 1);
745170979Syongari
746209812Snwhitehorn	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
747209812Snwhitehorn	    __func__, dmat, dmat->flags, error, nsegs + 1);
748209812Snwhitehorn
749209812Snwhitehorn	if (error == EINPROGRESS) {
750209812Snwhitehorn		return (error);
751209812Snwhitehorn	}
752209812Snwhitehorn
753209812Snwhitehorn	if (error)
754209812Snwhitehorn		(*callback)(callback_arg, dmat->segments, 0, error);
755170979Syongari	else
756209812Snwhitehorn		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
757170979Syongari
758209812Snwhitehorn	/*
759209812Snwhitehorn	 * Return ENOMEM to the caller so that it can pass it up the stack.
760209812Snwhitehorn	 * This error only happens when NOWAIT is set, so deferal is disabled.
761209812Snwhitehorn	 */
762209812Snwhitehorn	if (error == ENOMEM)
763209812Snwhitehorn		return (error);
764209812Snwhitehorn
765170979Syongari	return (0);
766170979Syongari}
767170979Syongari
768209812Snwhitehorn
769170979Syongari/*
770209812Snwhitehorn * Like _bus_dmamap_load(), but for mbufs.
771108939Sgrehan */
772108939Sgrehanint
773209812Snwhitehornbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
774209812Snwhitehorn		     struct mbuf *m0,
775209812Snwhitehorn		     bus_dmamap_callback2_t *callback, void *callback_arg,
776209812Snwhitehorn		     int flags)
777108939Sgrehan{
778209812Snwhitehorn	int nsegs, error;
779108939Sgrehan
780113255Sdes	M_ASSERTPKTHDR(m0);
781108939Sgrehan
782209812Snwhitehorn	flags |= BUS_DMA_NOWAIT;
783209812Snwhitehorn	nsegs = 0;
784209812Snwhitehorn	error = 0;
785108939Sgrehan	if (m0->m_pkthdr.len <= dmat->maxsize) {
786108939Sgrehan		int first = 1;
787209812Snwhitehorn		bus_addr_t lastaddr = 0;
788108939Sgrehan		struct mbuf *m;
789108939Sgrehan
790108939Sgrehan		for (m = m0; m != NULL && error == 0; m = m->m_next) {
791110335Sharti			if (m->m_len > 0) {
792209812Snwhitehorn				error = _bus_dmamap_load_buffer(dmat, map,
793209812Snwhitehorn						m->m_data, m->m_len,
794209812Snwhitehorn						NULL, flags, &lastaddr,
795209812Snwhitehorn						dmat->segments, &nsegs, first);
796110335Sharti				first = 0;
797110335Sharti			}
798108939Sgrehan		}
799108939Sgrehan	} else {
800108939Sgrehan		error = EINVAL;
801108939Sgrehan	}
802108939Sgrehan
803108939Sgrehan	if (error) {
804209812Snwhitehorn		/* force "no valid mappings" in callback */
805209812Snwhitehorn		(*callback)(callback_arg, dmat->segments, 0, 0, error);
806108939Sgrehan	} else {
807209812Snwhitehorn		(*callback)(callback_arg, dmat->segments,
808209812Snwhitehorn			    nsegs+1, m0->m_pkthdr.len, error);
809108939Sgrehan	}
810209812Snwhitehorn	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
811209812Snwhitehorn	    __func__, dmat, dmat->flags, error, nsegs + 1);
812108939Sgrehan	return (error);
813108939Sgrehan}
814108939Sgrehan
815140314Sscottlint
816209812Snwhitehornbus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
817209812Snwhitehorn			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
818209812Snwhitehorn			int flags)
819140314Sscottl{
820209812Snwhitehorn	int error;
821140314Sscottl
822140314Sscottl	M_ASSERTPKTHDR(m0);
823140314Sscottl
824209812Snwhitehorn	flags |= BUS_DMA_NOWAIT;
825147851Sgrehan	*nsegs = 0;
826209812Snwhitehorn	error = 0;
827140314Sscottl	if (m0->m_pkthdr.len <= dmat->maxsize) {
828140314Sscottl		int first = 1;
829209812Snwhitehorn		bus_addr_t lastaddr = 0;
830140314Sscottl		struct mbuf *m;
831140314Sscottl
832140314Sscottl		for (m = m0; m != NULL && error == 0; m = m->m_next) {
833140314Sscottl			if (m->m_len > 0) {
834209812Snwhitehorn				error = _bus_dmamap_load_buffer(dmat, map,
835209812Snwhitehorn						m->m_data, m->m_len,
836209812Snwhitehorn						NULL, flags, &lastaddr,
837209812Snwhitehorn						segs, nsegs, first);
838140314Sscottl				first = 0;
839140314Sscottl			}
840140314Sscottl		}
841140314Sscottl	} else {
842140314Sscottl		error = EINVAL;
843140314Sscottl	}
844140314Sscottl
845209812Snwhitehorn	/* XXX FIXME: Having to increment nsegs is really annoying */
846209812Snwhitehorn	++*nsegs;
847209812Snwhitehorn	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
848209812Snwhitehorn	    __func__, dmat, dmat->flags, error, *nsegs);
849140314Sscottl	return (error);
850140314Sscottl}
851140314Sscottl
852108939Sgrehan/*
853209812Snwhitehorn * Like _bus_dmamap_load(), but for uios.
854108939Sgrehan */
855108939Sgrehanint
856209812Snwhitehornbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
857209812Snwhitehorn		    struct uio *uio,
858209812Snwhitehorn		    bus_dmamap_callback2_t *callback, void *callback_arg,
859209812Snwhitehorn		    int flags)
860108939Sgrehan{
861209812Snwhitehorn	bus_addr_t lastaddr = 0;
862209812Snwhitehorn	int nsegs, error, first, i;
863108939Sgrehan	bus_size_t resid;
864108939Sgrehan	struct iovec *iov;
865209812Snwhitehorn	pmap_t pmap;
866108939Sgrehan
867209812Snwhitehorn	flags |= BUS_DMA_NOWAIT;
868108939Sgrehan	resid = uio->uio_resid;
869108939Sgrehan	iov = uio->uio_iov;
870108939Sgrehan
871108939Sgrehan	if (uio->uio_segflg == UIO_USERSPACE) {
872209812Snwhitehorn		KASSERT(uio->uio_td != NULL,
873209812Snwhitehorn			("bus_dmamap_load_uio: USERSPACE but no proc"));
874209812Snwhitehorn		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
875209812Snwhitehorn	} else
876209812Snwhitehorn		pmap = NULL;
877108939Sgrehan
878209812Snwhitehorn	nsegs = 0;
879209812Snwhitehorn	error = 0;
880108939Sgrehan	first = 1;
881108939Sgrehan	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
882108939Sgrehan		/*
883108939Sgrehan		 * Now at the first iovec to load.  Load each iovec
884108939Sgrehan		 * until we have exhausted the residual count.
885108939Sgrehan		 */
886108939Sgrehan		bus_size_t minlen =
887209812Snwhitehorn			resid < iov[i].iov_len ? resid : iov[i].iov_len;
888108939Sgrehan		caddr_t addr = (caddr_t) iov[i].iov_base;
889108939Sgrehan
890110335Sharti		if (minlen > 0) {
891209812Snwhitehorn			error = _bus_dmamap_load_buffer(dmat, map,
892209812Snwhitehorn					addr, minlen, pmap, flags, &lastaddr,
893209812Snwhitehorn					dmat->segments, &nsegs, first);
894110335Sharti			first = 0;
895108939Sgrehan
896110335Sharti			resid -= minlen;
897110335Sharti		}
898108939Sgrehan	}
899108939Sgrehan
900108939Sgrehan	if (error) {
901209812Snwhitehorn		/* force "no valid mappings" in callback */
902209812Snwhitehorn		(*callback)(callback_arg, dmat->segments, 0, 0, error);
903108939Sgrehan	} else {
904209812Snwhitehorn		(*callback)(callback_arg, dmat->segments,
905209812Snwhitehorn			    nsegs+1, uio->uio_resid, error);
906108939Sgrehan	}
907209812Snwhitehorn	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
908209812Snwhitehorn	    __func__, dmat, dmat->flags, error, nsegs + 1);
909108939Sgrehan	return (error);
910108939Sgrehan}
911108939Sgrehan
912108939Sgrehan/*
913209812Snwhitehorn * Release the mapping held by map.
914108939Sgrehan */
91599657Sbennovoid
916143634Sgrehan_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
917109935Sbenno{
918209812Snwhitehorn	struct bounce_page *bpage;
91999657Sbenno
920209812Snwhitehorn	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
921209812Snwhitehorn		STAILQ_REMOVE_HEAD(&map->bpages, links);
922209812Snwhitehorn		free_bounce_page(dmat, bpage);
923209812Snwhitehorn	}
924109935Sbenno}
925109935Sbenno
92699657Sbennovoid
927143634Sgrehan_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
928109919Sbenno{
929209812Snwhitehorn	struct bounce_page *bpage;
930109919Sbenno
931209812Snwhitehorn	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
932209812Snwhitehorn		/*
933209812Snwhitehorn		 * Handle data bouncing.  We might also
934209812Snwhitehorn		 * want to add support for invalidating
935209812Snwhitehorn		 * the caches on broken hardware
936209812Snwhitehorn		 */
937209812Snwhitehorn		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
938209812Snwhitehorn		    "performing bounce", __func__, op, dmat, dmat->flags);
939209812Snwhitehorn
940209812Snwhitehorn		if (op & BUS_DMASYNC_PREWRITE) {
941209812Snwhitehorn			while (bpage != NULL) {
942209812Snwhitehorn				bcopy((void *)bpage->datavaddr,
943209812Snwhitehorn				      (void *)bpage->vaddr,
944209812Snwhitehorn				      bpage->datacount);
945209812Snwhitehorn				bpage = STAILQ_NEXT(bpage, links);
946209812Snwhitehorn			}
947209812Snwhitehorn			dmat->bounce_zone->total_bounced++;
948209812Snwhitehorn		}
949209812Snwhitehorn
950209812Snwhitehorn		if (op & BUS_DMASYNC_POSTREAD) {
951209812Snwhitehorn			while (bpage != NULL) {
952209812Snwhitehorn				bcopy((void *)bpage->vaddr,
953209812Snwhitehorn				      (void *)bpage->datavaddr,
954209812Snwhitehorn				      bpage->datacount);
955209812Snwhitehorn				bpage = STAILQ_NEXT(bpage, links);
956209812Snwhitehorn			}
957209812Snwhitehorn			dmat->bounce_zone->total_bounced++;
958209812Snwhitehorn		}
959209812Snwhitehorn	}
960109919Sbenno}
961209812Snwhitehorn
962209812Snwhitehornstatic void
963209812Snwhitehorninit_bounce_pages(void *dummy __unused)
964209812Snwhitehorn{
965209812Snwhitehorn
966209812Snwhitehorn	total_bpages = 0;
967209812Snwhitehorn	STAILQ_INIT(&bounce_zone_list);
968209812Snwhitehorn	STAILQ_INIT(&bounce_map_waitinglist);
969209812Snwhitehorn	STAILQ_INIT(&bounce_map_callbacklist);
970209812Snwhitehorn	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
971209812Snwhitehorn}
972209812SnwhitehornSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
973209812Snwhitehorn
974209812Snwhitehornstatic struct sysctl_ctx_list *
975209812Snwhitehornbusdma_sysctl_tree(struct bounce_zone *bz)
976209812Snwhitehorn{
977209812Snwhitehorn	return (&bz->sysctl_tree);
978209812Snwhitehorn}
979209812Snwhitehorn
980209812Snwhitehornstatic struct sysctl_oid *
981209812Snwhitehornbusdma_sysctl_tree_top(struct bounce_zone *bz)
982209812Snwhitehorn{
983209812Snwhitehorn	return (bz->sysctl_tree_top);
984209812Snwhitehorn}
985209812Snwhitehorn
986209812Snwhitehornstatic int
987209812Snwhitehornalloc_bounce_zone(bus_dma_tag_t dmat)
988209812Snwhitehorn{
989209812Snwhitehorn	struct bounce_zone *bz;
990209812Snwhitehorn
991209812Snwhitehorn	/* Check to see if we already have a suitable zone */
992209812Snwhitehorn	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
993209812Snwhitehorn		if ((dmat->alignment <= bz->alignment)
994209812Snwhitehorn		 && (dmat->lowaddr >= bz->lowaddr)) {
995209812Snwhitehorn			dmat->bounce_zone = bz;
996209812Snwhitehorn			return (0);
997209812Snwhitehorn		}
998209812Snwhitehorn	}
999209812Snwhitehorn
1000209812Snwhitehorn	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1001209812Snwhitehorn	    M_NOWAIT | M_ZERO)) == NULL)
1002209812Snwhitehorn		return (ENOMEM);
1003209812Snwhitehorn
1004209812Snwhitehorn	STAILQ_INIT(&bz->bounce_page_list);
1005209812Snwhitehorn	bz->free_bpages = 0;
1006209812Snwhitehorn	bz->reserved_bpages = 0;
1007209812Snwhitehorn	bz->active_bpages = 0;
1008209812Snwhitehorn	bz->lowaddr = dmat->lowaddr;
1009209812Snwhitehorn	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1010209812Snwhitehorn	bz->map_count = 0;
1011209812Snwhitehorn	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1012209812Snwhitehorn	busdma_zonecount++;
1013209812Snwhitehorn	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1014209812Snwhitehorn	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1015209812Snwhitehorn	dmat->bounce_zone = bz;
1016209812Snwhitehorn
1017209812Snwhitehorn	sysctl_ctx_init(&bz->sysctl_tree);
1018209812Snwhitehorn	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1019209812Snwhitehorn	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1020209812Snwhitehorn	    CTLFLAG_RD, 0, "");
1021209812Snwhitehorn	if (bz->sysctl_tree_top == NULL) {
1022209812Snwhitehorn		sysctl_ctx_free(&bz->sysctl_tree);
1023209812Snwhitehorn		return (0);	/* XXX error code? */
1024209812Snwhitehorn	}
1025209812Snwhitehorn
1026209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1027209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1028209812Snwhitehorn	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1029209812Snwhitehorn	    "Total bounce pages");
1030209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1031209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1032209812Snwhitehorn	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1033209812Snwhitehorn	    "Free bounce pages");
1034209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1035209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1036209812Snwhitehorn	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1037209812Snwhitehorn	    "Reserved bounce pages");
1038209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1039209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1040209812Snwhitehorn	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1041209812Snwhitehorn	    "Active bounce pages");
1042209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1043209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1044209812Snwhitehorn	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1045209812Snwhitehorn	    "Total bounce requests");
1046209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1047209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1048209812Snwhitehorn	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1049209812Snwhitehorn	    "Total bounce requests that were deferred");
1050209812Snwhitehorn	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1051209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1052209812Snwhitehorn	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1053209812Snwhitehorn	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1054209812Snwhitehorn	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1055209812Snwhitehorn	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1056209812Snwhitehorn
1057209812Snwhitehorn	return (0);
1058209812Snwhitehorn}
1059209812Snwhitehorn
1060209812Snwhitehornstatic int
1061209812Snwhitehornalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1062209812Snwhitehorn{
1063209812Snwhitehorn	struct bounce_zone *bz;
1064209812Snwhitehorn	int count;
1065209812Snwhitehorn
1066209812Snwhitehorn	bz = dmat->bounce_zone;
1067209812Snwhitehorn	count = 0;
1068209812Snwhitehorn	while (numpages > 0) {
1069209812Snwhitehorn		struct bounce_page *bpage;
1070209812Snwhitehorn
1071209812Snwhitehorn		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1072209812Snwhitehorn						     M_NOWAIT | M_ZERO);
1073209812Snwhitehorn
1074209812Snwhitehorn		if (bpage == NULL)
1075209812Snwhitehorn			break;
1076209812Snwhitehorn		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1077209812Snwhitehorn							 M_NOWAIT, 0ul,
1078209812Snwhitehorn							 bz->lowaddr,
1079209812Snwhitehorn							 PAGE_SIZE,
1080209812Snwhitehorn							 0);
1081209812Snwhitehorn		if (bpage->vaddr == 0) {
1082209812Snwhitehorn			free(bpage, M_DEVBUF);
1083209812Snwhitehorn			break;
1084209812Snwhitehorn		}
1085209812Snwhitehorn		bpage->busaddr = pmap_kextract(bpage->vaddr);
1086209812Snwhitehorn		mtx_lock(&bounce_lock);
1087209812Snwhitehorn		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1088209812Snwhitehorn		total_bpages++;
1089209812Snwhitehorn		bz->total_bpages++;
1090209812Snwhitehorn		bz->free_bpages++;
1091209812Snwhitehorn		mtx_unlock(&bounce_lock);
1092209812Snwhitehorn		count++;
1093209812Snwhitehorn		numpages--;
1094209812Snwhitehorn	}
1095209812Snwhitehorn	return (count);
1096209812Snwhitehorn}
1097209812Snwhitehorn
1098209812Snwhitehornstatic int
1099209812Snwhitehornreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1100209812Snwhitehorn{
1101209812Snwhitehorn	struct bounce_zone *bz;
1102209812Snwhitehorn	int pages;
1103209812Snwhitehorn
1104209812Snwhitehorn	mtx_assert(&bounce_lock, MA_OWNED);
1105209812Snwhitehorn	bz = dmat->bounce_zone;
1106209812Snwhitehorn	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1107209812Snwhitehorn	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1108209812Snwhitehorn		return (map->pagesneeded - (map->pagesreserved + pages));
1109209812Snwhitehorn	bz->free_bpages -= pages;
1110209812Snwhitehorn	bz->reserved_bpages += pages;
1111209812Snwhitehorn	map->pagesreserved += pages;
1112209812Snwhitehorn	pages = map->pagesneeded - map->pagesreserved;
1113209812Snwhitehorn
1114209812Snwhitehorn	return (pages);
1115209812Snwhitehorn}
1116209812Snwhitehorn
1117209812Snwhitehornstatic bus_addr_t
1118209812Snwhitehornadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1119209812Snwhitehorn		bus_size_t size)
1120209812Snwhitehorn{
1121209812Snwhitehorn	struct bounce_zone *bz;
1122209812Snwhitehorn	struct bounce_page *bpage;
1123209812Snwhitehorn
1124209812Snwhitehorn	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1125209812Snwhitehorn	KASSERT(map != NULL && map != &nobounce_dmamap,
1126209812Snwhitehorn	    ("add_bounce_page: bad map %p", map));
1127209812Snwhitehorn
1128209812Snwhitehorn	bz = dmat->bounce_zone;
1129209812Snwhitehorn	if (map->pagesneeded == 0)
1130209812Snwhitehorn		panic("add_bounce_page: map doesn't need any pages");
1131209812Snwhitehorn	map->pagesneeded--;
1132209812Snwhitehorn
1133209812Snwhitehorn	if (map->pagesreserved == 0)
1134209812Snwhitehorn		panic("add_bounce_page: map doesn't need any pages");
1135209812Snwhitehorn	map->pagesreserved--;
1136209812Snwhitehorn
1137209812Snwhitehorn	mtx_lock(&bounce_lock);
1138209812Snwhitehorn	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1139209812Snwhitehorn	if (bpage == NULL)
1140209812Snwhitehorn		panic("add_bounce_page: free page list is empty");
1141209812Snwhitehorn
1142209812Snwhitehorn	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1143209812Snwhitehorn	bz->reserved_bpages--;
1144209812Snwhitehorn	bz->active_bpages++;
1145209812Snwhitehorn	mtx_unlock(&bounce_lock);
1146209812Snwhitehorn
1147209812Snwhitehorn	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1148209812Snwhitehorn		/* Page offset needs to be preserved. */
1149209812Snwhitehorn		bpage->vaddr |= vaddr & PAGE_MASK;
1150209812Snwhitehorn		bpage->busaddr |= vaddr & PAGE_MASK;
1151209812Snwhitehorn	}
1152209812Snwhitehorn	bpage->datavaddr = vaddr;
1153209812Snwhitehorn	bpage->datacount = size;
1154209812Snwhitehorn	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1155209812Snwhitehorn	return (bpage->busaddr);
1156209812Snwhitehorn}
1157209812Snwhitehorn
1158209812Snwhitehornstatic void
1159209812Snwhitehornfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1160209812Snwhitehorn{
1161209812Snwhitehorn	struct bus_dmamap *map;
1162209812Snwhitehorn	struct bounce_zone *bz;
1163209812Snwhitehorn
1164209812Snwhitehorn	bz = dmat->bounce_zone;
1165209812Snwhitehorn	bpage->datavaddr = 0;
1166209812Snwhitehorn	bpage->datacount = 0;
1167209812Snwhitehorn	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1168209812Snwhitehorn		/*
1169209812Snwhitehorn		 * Reset the bounce page to start at offset 0.  Other uses
1170209812Snwhitehorn		 * of this bounce page may need to store a full page of
1171209812Snwhitehorn		 * data and/or assume it starts on a page boundary.
1172209812Snwhitehorn		 */
1173209812Snwhitehorn		bpage->vaddr &= ~PAGE_MASK;
1174209812Snwhitehorn		bpage->busaddr &= ~PAGE_MASK;
1175209812Snwhitehorn	}
1176209812Snwhitehorn
1177209812Snwhitehorn	mtx_lock(&bounce_lock);
1178209812Snwhitehorn	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1179209812Snwhitehorn	bz->free_bpages++;
1180209812Snwhitehorn	bz->active_bpages--;
1181209812Snwhitehorn	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1182209812Snwhitehorn		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1183209812Snwhitehorn			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1184209812Snwhitehorn			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1185209812Snwhitehorn					   map, links);
1186209812Snwhitehorn			busdma_swi_pending = 1;
1187209812Snwhitehorn			bz->total_deferred++;
1188209812Snwhitehorn			swi_sched(vm_ih, 0);
1189209812Snwhitehorn		}
1190209812Snwhitehorn	}
1191209812Snwhitehorn	mtx_unlock(&bounce_lock);
1192209812Snwhitehorn}
1193209812Snwhitehorn
1194209812Snwhitehornvoid
1195209812Snwhitehornbusdma_swi(void)
1196209812Snwhitehorn{
1197209812Snwhitehorn	bus_dma_tag_t dmat;
1198209812Snwhitehorn	struct bus_dmamap *map;
1199209812Snwhitehorn
1200209812Snwhitehorn	mtx_lock(&bounce_lock);
1201209812Snwhitehorn	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1202209812Snwhitehorn		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1203209812Snwhitehorn		mtx_unlock(&bounce_lock);
1204209812Snwhitehorn		dmat = map->dmat;
1205209812Snwhitehorn		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1206209812Snwhitehorn		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1207209812Snwhitehorn				map->callback, map->callback_arg, /*flags*/0);
1208209812Snwhitehorn		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1209209812Snwhitehorn		mtx_lock(&bounce_lock);
1210209812Snwhitehorn	}
1211209812Snwhitehorn	mtx_unlock(&bounce_lock);
1212209812Snwhitehorn}
1213