busdma_machdep.c revision 162607
1145256Sjkoshy/*-
2177107Sjkoshy * Copyright (c) 1997, 1998 Justin T. Gibbs.
3145256Sjkoshy * All rights reserved.
4145256Sjkoshy *
5145256Sjkoshy * Redistribution and use in source and binary forms, with or without
6145256Sjkoshy * modification, are permitted provided that the following conditions
7145256Sjkoshy * are met:
8145256Sjkoshy * 1. Redistributions of source code must retain the above copyright
9145256Sjkoshy *    notice, this list of conditions, and the following disclaimer,
10145256Sjkoshy *    without modification, immediately at the beginning of the file.
11145256Sjkoshy * 2. The name of the author may not be used to endorse or promote products
12145256Sjkoshy *    derived from this software without specific prior written permission.
13145256Sjkoshy *
14145256Sjkoshy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15145256Sjkoshy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16145256Sjkoshy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17145256Sjkoshy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18145256Sjkoshy * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19145256Sjkoshy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20145256Sjkoshy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21145256Sjkoshy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22145256Sjkoshy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23145256Sjkoshy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24145256Sjkoshy * SUCH DAMAGE.
25145256Sjkoshy */
26145256Sjkoshy
27145256Sjkoshy#include <sys/cdefs.h>
28145256Sjkoshy__FBSDID("$FreeBSD: head/sys/i386/i386/busdma_machdep.c 162607 2006-09-24 19:24:26Z imp $");
29145256Sjkoshy
30145256Sjkoshy#include <sys/param.h>
31261342Sjhibbits#include <sys/kdb.h>
32145256Sjkoshy#include <ddb/ddb.h>
33145256Sjkoshy#include <ddb/db_output.h>
34145256Sjkoshy#include <sys/systm.h>
35145256Sjkoshy#include <sys/malloc.h>
36145256Sjkoshy#include <sys/bus.h>
37145256Sjkoshy#include <sys/interrupt.h>
38145256Sjkoshy#include <sys/kernel.h>
39145256Sjkoshy#include <sys/ktr.h>
40145256Sjkoshy#include <sys/lock.h>
41145256Sjkoshy#include <sys/proc.h>
42145256Sjkoshy#include <sys/mutex.h>
43145256Sjkoshy#include <sys/mbuf.h>
44145256Sjkoshy#include <sys/uio.h>
45145256Sjkoshy#include <sys/sysctl.h>
46185363Sjkoshy
47185363Sjkoshy#include <vm/vm.h>
48145256Sjkoshy#include <vm/vm_page.h>
49145340Smarcel#include <vm/vm_map.h>
50145256Sjkoshy
51145256Sjkoshy#include <machine/atomic.h>
52147191Sjkoshy#include <machine/bus.h>
53147759Sjkoshy#include <machine/md_var.h>
54185363Sjkoshy
55185363Sjkoshy#define MAX_BPAGES 512
56185363Sjkoshy#define BUS_DMA_USE_FILTER	BUS_DMA_BUS2
57185363Sjkoshy#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
58206089Sfabient#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
59206089Sfabient
60206089Sfabientstruct bounce_zone;
61206089Sfabient
62147191Sjkoshystruct bus_dma_tag {
63145256Sjkoshy	bus_dma_tag_t	  parent;
64147759Sjkoshy	bus_size_t	  alignment;
65147759Sjkoshy	bus_size_t	  boundary;
66147191Sjkoshy	bus_addr_t	  lowaddr;
67147191Sjkoshy	bus_addr_t	  highaddr;
68145256Sjkoshy	bus_dma_filter_t *filter;
69145256Sjkoshy	void		 *filterarg;
70147191Sjkoshy	bus_size_t	  maxsize;
71145256Sjkoshy	u_int		  nsegments;
72145256Sjkoshy	bus_size_t	  maxsegsz;
73183725Sjkoshy	int		  flags;
74183725Sjkoshy	int		  ref_count;
75183725Sjkoshy	int		  map_count;
76183725Sjkoshy	bus_dma_lock_t	 *lockfunc;
77200928Srpaulo	void		 *lockfuncarg;
78200928Srpaulo	bus_dma_segment_t *segments;
79200928Srpaulo	struct bounce_zone *bounce_zone;
80200928Srpaulo};
81204635Sgnn
82233320Sgonzostruct bounce_page {
83204635Sgnn	vm_offset_t	vaddr;		/* kva of bounce buffer */
84204635Sgnn	bus_addr_t	busaddr;	/* Physical address */
85233628Sfabient	vm_offset_t	datavaddr;	/* kva of client data */
86233628Sfabient	bus_size_t	datacount;	/* client data count */
87204635Sgnn	STAILQ_ENTRY(bounce_page) links;
88228869Sjhibbits};
89261342Sjhibbits
90228869Sjhibbitsint busdma_swi_pending;
91228869Sjhibbits
92204635Sgnnstruct bounce_zone {
93145256Sjkoshy	STAILQ_ENTRY(bounce_zone) links;
94145256Sjkoshy	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
95145256Sjkoshy	int		total_bpages;
96145256Sjkoshy	int		free_bpages;
97145256Sjkoshy	int		reserved_bpages;
98145256Sjkoshy	int		active_bpages;
99145256Sjkoshy	int		total_bounced;
100145256Sjkoshy	int		total_deferred;
101145256Sjkoshy	bus_size_t	alignment;
102145256Sjkoshy	bus_size_t	boundary;
103145256Sjkoshy	bus_addr_t	lowaddr;
104145256Sjkoshy	char		zoneid[8];
105145256Sjkoshy	char		lowaddrid[20];
106145256Sjkoshy	struct sysctl_ctx_list sysctl_tree;
107145256Sjkoshy	struct sysctl_oid *sysctl_tree_top;
108145256Sjkoshy};
109145256Sjkoshy
110183725Sjkoshystatic struct mtx bounce_lock;
111145256Sjkoshystatic int total_bpages;
112145256Sjkoshystatic int busdma_zonecount;
113145256Sjkoshystatic STAILQ_HEAD(, bounce_zone) bounce_zone_list;
114145256Sjkoshy
115145256SjkoshySYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
116145256SjkoshySYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
117145256Sjkoshy	   "Total bounce pages");
118183725Sjkoshy
119183725Sjkoshystruct bus_dmamap {
120183725Sjkoshy	struct bp_list	       bpages;
121183725Sjkoshy	int		       pagesneeded;
122183725Sjkoshy	int		       pagesreserved;
123183725Sjkoshy	bus_dma_tag_t	       dmat;
124183725Sjkoshy	void		      *buf;		/* unmapped buffer pointer */
125183725Sjkoshy	bus_size_t	       buflen;		/* unmapped buffer length */
126183725Sjkoshy	bus_dmamap_callback_t *callback;
127183725Sjkoshy	void		      *callback_arg;
128183725Sjkoshy	STAILQ_ENTRY(bus_dmamap) links;
129183725Sjkoshy};
130183725Sjkoshy
131183725Sjkoshystatic STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
132183725Sjkoshystatic STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
133183725Sjkoshystatic struct bus_dmamap nobounce_dmamap;
134183725Sjkoshy
135183725Sjkoshystatic void init_bounce_pages(void *dummy);
136183725Sjkoshystatic int alloc_bounce_zone(bus_dma_tag_t dmat);
137183725Sjkoshystatic int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
138183725Sjkoshystatic int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
139185363Sjkoshy				int commit);
140183725Sjkoshystatic bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
141185363Sjkoshy				   vm_offset_t vaddr, bus_size_t size);
142183725Sjkoshystatic void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
143185363Sjkoshyint run_filter(bus_dma_tag_t dmat, bus_addr_t paddr);
144183725Sjkoshyint _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
145183725Sjkoshy			    bus_size_t buflen, int flags, int *nb);
146183725Sjkoshy
147185363Sjkoshy/*
148185363Sjkoshy * Return true if a match is made.
149185363Sjkoshy *
150185363Sjkoshy * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
151185363Sjkoshy *
152185363Sjkoshy * If paddr is within the bounds of the dma tag then call the filter callback
153185363Sjkoshy * to check for a match, if there is no filter callback then assume a match.
154185363Sjkoshy */
155200928Srpauloint
156204635Sgnnrun_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
157233335Sgonzo{
158206089Sfabient	int retval;
159228869Sjhibbits
160261342Sjhibbits	retval = 0;
161185363Sjkoshy
162233628Sfabient	do {
163233628Sfabient		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
164185363Sjkoshy		 || ((paddr & (dmat->alignment - 1)) != 0))
165185363Sjkoshy		 && (dmat->filter == NULL
166185363Sjkoshy		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
167185363Sjkoshy			retval = 1;
168185363Sjkoshy
169185363Sjkoshy		dmat = dmat->parent;
170185363Sjkoshy	} while (retval == 0 && dmat != NULL);
171185363Sjkoshy	return (retval);
172263446Shiren}
173263446Shiren
174263446Shiren/*
175263446Shiren * Convenience function for manipulating driver locks from busdma (during
176263446Shiren * busdma_swi, for example).  Drivers that don't provide their own locks
177185363Sjkoshy * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
178185363Sjkoshy * non-mutex locking scheme don't have to use this at all.
179185363Sjkoshy */
180185363Sjkoshyvoid
181185363Sjkoshybusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
182185363Sjkoshy{
183185363Sjkoshy	struct mtx *dmtx;
184185363Sjkoshy
185185363Sjkoshy	dmtx = (struct mtx *)arg;
186185363Sjkoshy	switch (op) {
187185363Sjkoshy	case BUS_DMA_LOCK:
188187761Sjeff		mtx_lock(dmtx);
189187761Sjeff		break;
190187761Sjeff	case BUS_DMA_UNLOCK:
191187761Sjeff		mtx_unlock(dmtx);
192187761Sjeff		break;
193267062Skib	default:
194267062Skib		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
195267062Skib	}
196267062Skib}
197267062Skib
198248842Ssbruno/*
199248842Ssbruno * dflt_lock should never get called.  It gets put into the dma tag when
200248842Ssbruno * lockfunc == NULL, which is only valid if the maps that are associated
201248842Ssbruno * with the tag are meant to never be defered.
202248842Ssbruno * XXX Should have a way to identify which driver is responsible here.
203240164Sfabient */
204240164Sfabientstatic void
205240164Sfabientdflt_lock(void *arg, bus_dma_lock_op_t op)
206240164Sfabient{
207240164Sfabient	panic("driver error: busdma dflt_lock called");
208246166Ssbruno}
209246166Ssbruno
210246166Ssbruno/*
211246166Ssbruno * Allocate a device specific dma_tag.
212246166Ssbruno */
213232366Sdavideint
214232366Sdavidebus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
215232366Sdavide		   bus_size_t boundary, bus_addr_t lowaddr,
216232366Sdavide		   bus_addr_t highaddr, bus_dma_filter_t *filter,
217232366Sdavide		   void *filterarg, bus_size_t maxsize, int nsegments,
218241738Ssbruno		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
219241738Ssbruno		   void *lockfuncarg, bus_dma_tag_t *dmat)
220241738Ssbruno{
221241738Ssbruno	bus_dma_tag_t newtag;
222241738Ssbruno	int error = 0;
223206089Sfabient
224206089Sfabient	/* Basic sanity checking */
225206089Sfabient	if (boundary != 0 && boundary < maxsegsz)
226206089Sfabient		maxsegsz = boundary;
227206089Sfabient
228267062Skib	/* Return a NULL tag on failure */
229267062Skib	*dmat = NULL;
230267062Skib
231267062Skib	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
232267062Skib	    M_ZERO | M_NOWAIT);
233206089Sfabient	if (newtag == NULL) {
234206089Sfabient		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
235206089Sfabient		    __func__, newtag, 0, error);
236206089Sfabient		return (ENOMEM);
237206089Sfabient	}
238248842Ssbruno
239248842Ssbruno	newtag->parent = parent;
240248842Ssbruno	newtag->alignment = alignment;
241248842Ssbruno	newtag->boundary = boundary;
242248842Ssbruno	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
243232366Sdavide	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
244232366Sdavide	    (PAGE_SIZE - 1);
245232366Sdavide	newtag->filter = filter;
246232366Sdavide	newtag->filterarg = filterarg;
247232366Sdavide	newtag->maxsize = maxsize;
248206089Sfabient	newtag->nsegments = nsegments;
249206089Sfabient	newtag->maxsegsz = maxsegsz;
250206089Sfabient	newtag->flags = flags;
251206089Sfabient	newtag->ref_count = 1; /* Count ourself */
252206089Sfabient	newtag->map_count = 0;
253185363Sjkoshy	if (lockfunc != NULL) {
254185363Sjkoshy		newtag->lockfunc = lockfunc;
255185363Sjkoshy		newtag->lockfuncarg = lockfuncarg;
256185363Sjkoshy	} else {
257185363Sjkoshy		newtag->lockfunc = dflt_lock;
258185363Sjkoshy		newtag->lockfuncarg = NULL;
259183725Sjkoshy	}
260183725Sjkoshy	newtag->segments = NULL;
261183725Sjkoshy
262183725Sjkoshy	/* Take into account any restrictions imposed by our parent tag */
263233628Sfabient	if (parent != NULL) {
264263446Shiren		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
265233628Sfabient		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
266233628Sfabient		if (newtag->boundary == 0)
267233628Sfabient			newtag->boundary = parent->boundary;
268267062Skib		else if (parent->boundary != 0)
269248842Ssbruno			newtag->boundary = MIN(parent->boundary,
270240164Sfabient					       newtag->boundary);
271246166Ssbruno		if ((newtag->filter != NULL) ||
272233628Sfabient		    ((parent->flags & BUS_DMA_USE_FILTER) != 0))
273241738Ssbruno			newtag->flags |= BUS_DMA_USE_FILTER;
274233628Sfabient		if (newtag->filter == NULL) {
275267062Skib			/*
276233628Sfabient			 * Short circuit looking at our parent directly
277233628Sfabient			 * since we have encapsulated all of its information
278233628Sfabient			 */
279233628Sfabient			newtag->filter = parent->filter;
280233628Sfabient			newtag->filterarg = parent->filterarg;
281233628Sfabient			newtag->parent = parent->parent;
282233628Sfabient		}
283233628Sfabient		if (newtag->parent != NULL)
284233628Sfabient			atomic_add_int(&parent->ref_count, 1);
285261342Sjhibbits	}
286233628Sfabient
287183725Sjkoshy	if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
288183725Sjkoshy	 || newtag->alignment > 1)
289145256Sjkoshy		newtag->flags |= BUS_DMA_COULD_BOUNCE;
290183725Sjkoshy
291145256Sjkoshy	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
292145256Sjkoshy	    (flags & BUS_DMA_ALLOCNOW) != 0) {
293183725Sjkoshy		struct bounce_zone *bz;
294185363Sjkoshy
295185363Sjkoshy		/* Must bounce */
296185363Sjkoshy
297185363Sjkoshy		if ((error = alloc_bounce_zone(newtag)) != 0) {
298185363Sjkoshy			free(newtag, M_DEVBUF);
299185363Sjkoshy			return (error);
300185363Sjkoshy		}
301183725Sjkoshy		bz = newtag->bounce_zone;
302185363Sjkoshy
303185363Sjkoshy		if (ptoa(bz->total_bpages) < maxsize) {
304183725Sjkoshy			int pages;
305183725Sjkoshy
306185363Sjkoshy			pages = atop(maxsize) - bz->total_bpages;
307185363Sjkoshy
308185363Sjkoshy			/* Add pages to our bounce pool */
309263446Shiren			if (alloc_bounce_pages(newtag, pages) < pages)
310185363Sjkoshy				error = ENOMEM;
311185363Sjkoshy		}
312187761Sjeff		/* Performed initial allocation */
313267062Skib		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
314248842Ssbruno	}
315240164Sfabient
316246166Ssbruno	if (error != 0) {
317232366Sdavide		free(newtag, M_DEVBUF);
318241738Ssbruno	} else {
319206089Sfabient		*dmat = newtag;
320267062Skib	}
321206089Sfabient	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
322206089Sfabient	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
323248842Ssbruno	return (error);
324232366Sdavide}
325206089Sfabient
326185363Sjkoshyint
327183725Sjkoshybus_dma_tag_destroy(bus_dma_tag_t dmat)
328185363Sjkoshy{
329183725Sjkoshy	bus_dma_tag_t dmat_copy;
330183725Sjkoshy	int error;
331185363Sjkoshy
332185363Sjkoshy	error = 0;
333183725Sjkoshy	dmat_copy = dmat;
334183725Sjkoshy
335185363Sjkoshy	if (dmat != NULL) {
336185363Sjkoshy
337183725Sjkoshy		if (dmat->map_count != 0) {
338183725Sjkoshy			error = EBUSY;
339185363Sjkoshy			goto out;
340183725Sjkoshy		}
341200928Srpaulo
342200928Srpaulo		while (dmat != NULL) {
343200928Srpaulo			bus_dma_tag_t parent;
344204635Sgnn
345233320Sgonzo			parent = dmat->parent;
346233335Sgonzo			atomic_subtract_int(&dmat->ref_count, 1);
347204635Sgnn			if (dmat->ref_count == 0) {
348228869Sjhibbits				if (dmat->segments != NULL)
349261342Sjhibbits					free(dmat->segments, M_DEVBUF);
350261342Sjhibbits				free(dmat, M_DEVBUF);
351228869Sjhibbits				/*
352228869Sjhibbits				 * Last reference count, so
353233628Sfabient				 * release our reference
354233628Sfabient				 * count on our parent.
355233628Sfabient				 */
356233628Sfabient				dmat = parent;
357233628Sfabient			} else
358233628Sfabient				dmat = NULL;
359233628Sfabient		}
360233628Sfabient	}
361233628Sfabientout:
362233628Sfabient	CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error);
363183725Sjkoshy	return (error);
364183725Sjkoshy}
365185363Sjkoshy
366185363Sjkoshy/*
367185363Sjkoshy * Allocate a handle for mapping from kva/uva/physical
368183725Sjkoshy * address space into bus device space.
369183725Sjkoshy */
370183725Sjkoshyint
371145256Sjkoshybus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
372145256Sjkoshy{
373145256Sjkoshy	int error;
374145256Sjkoshy
375145256Sjkoshy	error = 0;
376145256Sjkoshy
377145256Sjkoshy	if (dmat->segments == NULL) {
378145256Sjkoshy		dmat->segments = (bus_dma_segment_t *)malloc(
379145256Sjkoshy		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
380145256Sjkoshy		    M_NOWAIT);
381145256Sjkoshy		if (dmat->segments == NULL) {
382145256Sjkoshy			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
383145256Sjkoshy			    __func__, dmat, ENOMEM);
384145256Sjkoshy			return (ENOMEM);
385145256Sjkoshy		}
386145256Sjkoshy	}
387145256Sjkoshy
388183725Sjkoshy	/*
389228557Sdim	 * Bouncing might be required if the driver asks for an active
390183725Sjkoshy	 * exclusion region, a data alignment that is stricter than 1, and/or
391183725Sjkoshy	 * an active address boundary.
392183725Sjkoshy	 */
393183725Sjkoshy	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
394145256Sjkoshy
395183725Sjkoshy		/* Must bounce */
396145256Sjkoshy		struct bounce_zone *bz;
397145256Sjkoshy		int maxpages;
398145256Sjkoshy
399145256Sjkoshy		if (dmat->bounce_zone == NULL) {
400145256Sjkoshy			if ((error = alloc_bounce_zone(dmat)) != 0)
401145256Sjkoshy				return (error);
402145256Sjkoshy		}
403145256Sjkoshy		bz = dmat->bounce_zone;
404145256Sjkoshy
405145256Sjkoshy		*mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
406145256Sjkoshy					     M_NOWAIT | M_ZERO);
407145256Sjkoshy		if (*mapp == NULL) {
408145256Sjkoshy			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
409145256Sjkoshy			    __func__, dmat, ENOMEM);
410145256Sjkoshy			return (ENOMEM);
411145256Sjkoshy		}
412145256Sjkoshy
413145256Sjkoshy		/* Initialize the new map */
414145256Sjkoshy		STAILQ_INIT(&((*mapp)->bpages));
415145256Sjkoshy
416145256Sjkoshy		/*
417233628Sfabient		 * Attempt to add pages to our pool on a per-instance
418233628Sfabient		 * basis up to a sane limit.
419233628Sfabient		 */
420233628Sfabient		if (dmat->alignment > 1)
421233628Sfabient			maxpages = MAX_BPAGES;
422233628Sfabient		else
423145256Sjkoshy			maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr));
424145256Sjkoshy		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
425145256Sjkoshy		 || (dmat->map_count > 0 && bz->total_bpages < maxpages)) {
426145256Sjkoshy			int pages;
427240164Sfabient
428145256Sjkoshy			pages = MAX(atop(dmat->maxsize), 1);
429145256Sjkoshy			pages = MIN(maxpages - bz->total_bpages, pages);
430206089Sfabient			pages = MAX(pages, 1);
431145256Sjkoshy			if (alloc_bounce_pages(dmat, pages) < pages)
432147759Sjkoshy				error = ENOMEM;
433145256Sjkoshy
434240164Sfabient			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
435145256Sjkoshy				if (error == 0)
436145256Sjkoshy					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
437145256Sjkoshy			} else {
438145256Sjkoshy				error = 0;
439145256Sjkoshy			}
440145256Sjkoshy		}
441174406Sjkoshy	} else {
442183107Sjkoshy		*mapp = NULL;
443145256Sjkoshy	}
444174406Sjkoshy	if (error == 0)
445145256Sjkoshy		dmat->map_count++;
446145256Sjkoshy	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
447183725Sjkoshy	    __func__, dmat, dmat->flags, error);
448183725Sjkoshy	return (error);
449145256Sjkoshy}
450145256Sjkoshy
451174406Sjkoshy/*
452145256Sjkoshy * Destroy a handle for mapping from kva/uva/physical
453145256Sjkoshy * address space into bus device space.
454145256Sjkoshy */
455174406Sjkoshyint
456145256Sjkoshybus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
457145340Smarcel{
458145256Sjkoshy	if (map != NULL && map != &nobounce_dmamap) {
459145256Sjkoshy		if (STAILQ_FIRST(&map->bpages) != NULL) {
460145256Sjkoshy			CTR3(KTR_BUSDMA, "%s: tag %p error %d",
461145256Sjkoshy			    __func__, dmat, EBUSY);
462145256Sjkoshy			return (EBUSY);
463145340Smarcel		}
464145256Sjkoshy		free(map, M_DEVBUF);
465145256Sjkoshy	}
466145256Sjkoshy	dmat->map_count--;
467145256Sjkoshy	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
468145256Sjkoshy	return (0);
469145256Sjkoshy}
470145351Sjkoshy
471145351Sjkoshy
472145351Sjkoshy/*
473183075Sjkoshy * Allocate a piece of memory that can be efficiently mapped into
474145351Sjkoshy * bus device space based on the constraints lited in the dma tag.
475145351Sjkoshy * A dmamap to for use with dmamap_load is also allocated.
476145351Sjkoshy */
477145351Sjkoshyint
478145256Sjkoshybus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
479145256Sjkoshy		 bus_dmamap_t *mapp)
480145256Sjkoshy{
481145256Sjkoshy	int mflags;
482145256Sjkoshy
483145256Sjkoshy	if (flags & BUS_DMA_NOWAIT)
484145256Sjkoshy		mflags = M_NOWAIT;
485145256Sjkoshy	else
486145256Sjkoshy		mflags = M_WAITOK;
487145256Sjkoshy	if (flags & BUS_DMA_ZERO)
488145256Sjkoshy		mflags |= M_ZERO;
489145256Sjkoshy
490145256Sjkoshy	/* If we succeed, no mapping/bouncing will be required */
491183107Sjkoshy	*mapp = NULL;
492183107Sjkoshy
493145256Sjkoshy	if (dmat->segments == NULL) {
494145256Sjkoshy		dmat->segments = (bus_dma_segment_t *)malloc(
495147191Sjkoshy		    sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
496183725Sjkoshy		    M_NOWAIT);
497145256Sjkoshy		if (dmat->segments == NULL) {
498145256Sjkoshy			CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
499145256Sjkoshy			    __func__, dmat, dmat->flags, ENOMEM);
500145256Sjkoshy			return (ENOMEM);
501145256Sjkoshy		}
502147191Sjkoshy	}
503145256Sjkoshy
504145256Sjkoshy	/*
505145256Sjkoshy	 * XXX:
506145256Sjkoshy	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
507145256Sjkoshy	 * alignment guarantees of malloc need to be nailed down, and the
508145256Sjkoshy	 * code below should be rewritten to take that into account.
509145256Sjkoshy	 *
510174406Sjkoshy	 * In the meantime, we'll warn the user if malloc gets it wrong.
511145256Sjkoshy	 */
512145256Sjkoshy	if ((dmat->maxsize <= PAGE_SIZE) &&
513145256Sjkoshy	   (dmat->alignment < dmat->maxsize) &&
514174406Sjkoshy	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem)) {
515145256Sjkoshy		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
516145256Sjkoshy	} else {
517147191Sjkoshy		/*
518147191Sjkoshy		 * XXX Use Contigmalloc until it is merged into this facility
519145256Sjkoshy		 *     and handles multi-seg allocations.  Nobody is doing
520145256Sjkoshy		 *     multi-seg allocations yet though.
521145256Sjkoshy		 * XXX Certain AGP hardware does.
522145256Sjkoshy		 */
523145256Sjkoshy		*vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
524145256Sjkoshy		    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
525145256Sjkoshy		    dmat->boundary);
526145256Sjkoshy	}
527145256Sjkoshy	if (*vaddr == NULL) {
528174406Sjkoshy		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
529145256Sjkoshy		    __func__, dmat, dmat->flags, ENOMEM);
530145256Sjkoshy		return (ENOMEM);
531145256Sjkoshy	} else if ((uintptr_t)*vaddr & (dmat->alignment - 1)) {
532174406Sjkoshy		printf("bus_dmamem_alloc failed to align memory properly.\n");
533145256Sjkoshy	}
534145256Sjkoshy	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
535145256Sjkoshy	    __func__, dmat, dmat->flags, ENOMEM);
536147191Sjkoshy	return (0);
537145256Sjkoshy}
538147191Sjkoshy
539145256Sjkoshy/*
540147191Sjkoshy * Free a piece of memory and it's allociated dmamap, that was allocated
541145256Sjkoshy * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
542147191Sjkoshy */
543145256Sjkoshyvoid
544147191Sjkoshybus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
545145256Sjkoshy{
546145256Sjkoshy	/*
547145256Sjkoshy	 * dmamem does not need to be bounced, so the map should be
548174406Sjkoshy	 * NULL
549145256Sjkoshy	 */
550145256Sjkoshy	if (map != NULL)
551174406Sjkoshy		panic("bus_dmamem_free: Invalid map freed\n");
552145256Sjkoshy	if ((dmat->maxsize <= PAGE_SIZE) &&
553145256Sjkoshy	   (dmat->alignment < dmat->maxsize) &&
554145256Sjkoshy	    dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem))
555145256Sjkoshy		free(vaddr, M_DEVBUF);
556174406Sjkoshy	else {
557145256Sjkoshy		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
558145256Sjkoshy	}
559145256Sjkoshy	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
560145256Sjkoshy}
561147191Sjkoshy
562147191Sjkoshyint
563145256Sjkoshy_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
564145256Sjkoshy			bus_size_t buflen, int flags, int *nb)
565174406Sjkoshy{
566145256Sjkoshy	vm_offset_t vaddr;
567145256Sjkoshy	vm_offset_t vendaddr;
568145256Sjkoshy	bus_addr_t paddr;
569147191Sjkoshy	int needbounce = *nb;
570147191Sjkoshy
571147759Sjkoshy	if ((map != &nobounce_dmamap && map->pagesneeded == 0)) {
572147191Sjkoshy		CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
573145256Sjkoshy		    "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
574185363Sjkoshy		    dmat->boundary, dmat->alignment);
575185363Sjkoshy		CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
576185363Sjkoshy		    map, &nobounce_dmamap, map->pagesneeded);
577185363Sjkoshy		/*
578185363Sjkoshy		 * Count the number of bounce pages
579185363Sjkoshy		 * needed in order to complete this transfer
580185363Sjkoshy		 */
581185363Sjkoshy		vaddr = trunc_page((vm_offset_t)buf);
582185363Sjkoshy		vendaddr = (vm_offset_t)buf + buflen;
583185363Sjkoshy
584185363Sjkoshy		while (vaddr < vendaddr) {
585185363Sjkoshy			paddr = pmap_kextract(vaddr);
586185363Sjkoshy			if (((dmat->flags & BUS_DMA_USE_FILTER) != 0) &&
587185363Sjkoshy			    run_filter(dmat, paddr) != 0) {
588185363Sjkoshy				needbounce = 1;
589185363Sjkoshy				map->pagesneeded++;
590185363Sjkoshy			}
591198433Sjkoshy			vaddr += PAGE_SIZE;
592198433Sjkoshy		}
593198433Sjkoshy		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
594198433Sjkoshy	}
595185363Sjkoshy
596185363Sjkoshy	/* Reserve Necessary Bounce Pages */
597185363Sjkoshy	if (map->pagesneeded != 0) {
598185363Sjkoshy		mtx_lock(&bounce_lock);
599185363Sjkoshy		if (flags & BUS_DMA_NOWAIT) {
600185363Sjkoshy			if (reserve_bounce_pages(dmat, map, 0) != 0) {
601185363Sjkoshy				mtx_unlock(&bounce_lock);
602185363Sjkoshy				return (ENOMEM);
603185363Sjkoshy			}
604185363Sjkoshy		} else {
605185363Sjkoshy			if (reserve_bounce_pages(dmat, map, 1) != 0) {
606185363Sjkoshy				/* Queue us for resources */
607185363Sjkoshy				map->dmat = dmat;
608198433Sjkoshy				map->buf = buf;
609198433Sjkoshy				map->buflen = buflen;
610198433Sjkoshy				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
611198433Sjkoshy				    map, links);
612198433Sjkoshy				mtx_unlock(&bounce_lock);
613198433Sjkoshy				return (EINPROGRESS);
614198433Sjkoshy			}
615198433Sjkoshy		}
616198433Sjkoshy		mtx_unlock(&bounce_lock);
617198433Sjkoshy	}
618198433Sjkoshy
619198433Sjkoshy	*nb = needbounce;
620198433Sjkoshy	return (0);
621263446Shiren}
622263446Shiren
623198433Sjkoshy/*
624198433Sjkoshy * Utility function to load a linear buffer.  lastaddrp holds state
625267062Skib * between invocations (for multiple-buffer loads).  segp contains
626267062Skib * the starting segment on entrace, and the ending segment on exit.
627248842Ssbruno * first indicates if this is the first invocation of this function.
628248842Ssbruno */
629240164Sfabientstatic __inline int
630240164Sfabient_bus_dmamap_load_buffer(bus_dma_tag_t dmat,
631246166Ssbruno    			bus_dmamap_t map,
632246166Ssbruno			void *buf, bus_size_t buflen,
633232366Sdavide			pmap_t pmap,
634232366Sdavide			int flags,
635241738Ssbruno			bus_addr_t *lastaddrp,
636241738Ssbruno			bus_dma_segment_t *segs,
637206089Sfabient			int *segp,
638206089Sfabient			int first)
639267062Skib{
640267062Skib	bus_size_t sgsize;
641198433Sjkoshy	bus_addr_t curaddr, lastaddr, baddr, bmask;
642185363Sjkoshy	vm_offset_t vaddr;
643185363Sjkoshy	int needbounce = 0;
644185363Sjkoshy	int seg, error;
645185363Sjkoshy
646185363Sjkoshy	if (map == NULL)
647185363Sjkoshy		map = &nobounce_dmamap;
648185363Sjkoshy
649185363Sjkoshy	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
650185363Sjkoshy		error = _bus_dmamap_count_pages(dmat, map, buf, buflen, flags,
651185363Sjkoshy		    &needbounce);
652185363Sjkoshy		if (error)
653185363Sjkoshy			return (error);
654185363Sjkoshy	}
655185363Sjkoshy
656185363Sjkoshy	vaddr = (vm_offset_t)buf;
657185363Sjkoshy	lastaddr = *lastaddrp;
658185363Sjkoshy	bmask = ~(dmat->boundary - 1);
659185363Sjkoshy
660185363Sjkoshy	for (seg = *segp; buflen > 0 ; ) {
661185363Sjkoshy		/*
662185363Sjkoshy		 * Get the physical address for this segment.
663185363Sjkoshy		 */
664185363Sjkoshy		if (pmap)
665185363Sjkoshy			curaddr = pmap_extract(pmap, vaddr);
666185363Sjkoshy		else
667185363Sjkoshy			curaddr = pmap_kextract(vaddr);
668185363Sjkoshy
669185363Sjkoshy		/*
670185363Sjkoshy		 * Compute the segment size, and adjust counts.
671185363Sjkoshy		 */
672185363Sjkoshy		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
673185363Sjkoshy		if (buflen < sgsize)
674185363Sjkoshy			sgsize = buflen;
675185363Sjkoshy
676185363Sjkoshy		/*
677185363Sjkoshy		 * Make sure we don't cross any boundaries.
678185363Sjkoshy		 */
679185363Sjkoshy		if (dmat->boundary > 0) {
680185363Sjkoshy			baddr = (curaddr + dmat->boundary) & bmask;
681185363Sjkoshy			if (sgsize > (baddr - curaddr))
682185363Sjkoshy				sgsize = (baddr - curaddr);
683185363Sjkoshy		}
684185363Sjkoshy
685185363Sjkoshy		if (((dmat->flags & BUS_DMA_USE_FILTER) != 0) &&
686185363Sjkoshy		    map->pagesneeded != 0 && run_filter(dmat, curaddr))
687185363Sjkoshy			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
688185363Sjkoshy
689185363Sjkoshy		/*
690185363Sjkoshy		 * Insert chunk into a segment, coalescing with
691206089Sfabient		 * previous segment if possible.
692185363Sjkoshy		 */
693185363Sjkoshy		if (first) {
694185363Sjkoshy			segs[seg].ds_addr = curaddr;
695185363Sjkoshy			segs[seg].ds_len = sgsize;
696185363Sjkoshy			first = 0;
697185363Sjkoshy		} else {
698185363Sjkoshy			if (needbounce == 0 && curaddr == lastaddr &&
699185363Sjkoshy			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
700185363Sjkoshy			    (dmat->boundary == 0 ||
701185363Sjkoshy			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
702185363Sjkoshy				segs[seg].ds_len += sgsize;
703185363Sjkoshy			else {
704185363Sjkoshy				if (++seg >= dmat->nsegments)
705185363Sjkoshy					break;
706185363Sjkoshy				segs[seg].ds_addr = curaddr;
707185363Sjkoshy				segs[seg].ds_len = sgsize;
708185363Sjkoshy			}
709185363Sjkoshy		}
710185363Sjkoshy
711185363Sjkoshy		lastaddr = curaddr + sgsize;
712185363Sjkoshy		vaddr += sgsize;
713185363Sjkoshy		buflen -= sgsize;
714185363Sjkoshy	}
715185363Sjkoshy
716185363Sjkoshy	*segp = seg;
717185363Sjkoshy	*lastaddrp = lastaddr;
718185363Sjkoshy
719185363Sjkoshy	/*
720185363Sjkoshy	 * Did we fit?
721185363Sjkoshy	 */
722185363Sjkoshy	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
723185363Sjkoshy}
724185363Sjkoshy
725185363Sjkoshy/*
726185363Sjkoshy * Map the buffer buf into bus space using the dmamap map.
727185363Sjkoshy */
728185363Sjkoshyint
729185363Sjkoshybus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
730185363Sjkoshy		bus_size_t buflen, bus_dmamap_callback_t *callback,
731185363Sjkoshy		void *callback_arg, int flags)
732185363Sjkoshy{
733185363Sjkoshy	bus_addr_t		lastaddr = 0;
734185363Sjkoshy	int			error, nsegs = 0;
735185363Sjkoshy
736185363Sjkoshy	if (map != NULL) {
737185363Sjkoshy		flags |= BUS_DMA_WAITOK;
738185363Sjkoshy		map->callback = callback;
739240164Sfabient		map->callback_arg = callback_arg;
740206089Sfabient	}
741206089Sfabient
742206089Sfabient	error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
743206089Sfabient	     &lastaddr, dmat->segments, &nsegs, 1);
744206089Sfabient
745206089Sfabient	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
746206089Sfabient	    __func__, dmat, dmat->flags, error, nsegs + 1);
747206089Sfabient
748206089Sfabient	if (error == EINPROGRESS) {
749206089Sfabient		return (error);
750206089Sfabient	}
751206089Sfabient
752206089Sfabient	if (error)
753206089Sfabient		(*callback)(callback_arg, dmat->segments, 0, error);
754206089Sfabient	else
755206089Sfabient		(*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
756206089Sfabient
757206089Sfabient	/*
758241738Ssbruno	 * Return ENOMEM to the caller so that it can pass it up the stack.
759240164Sfabient	 * This error only happens when NOWAIT is set, so deferal is disabled.
760240164Sfabient	 */
761240164Sfabient	if (error == ENOMEM)
762240164Sfabient		return (error);
763240164Sfabient
764240164Sfabient	return (0);
765240164Sfabient}
766240164Sfabient
767240164Sfabient
768240164Sfabient/*
769240164Sfabient * Like _bus_dmamap_load(), but for mbufs.
770240164Sfabient */
771240164Sfabientstatic __inline int
772240164Sfabient_bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
773240164Sfabient			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
774240164Sfabient			int flags)
775240164Sfabient{
776240164Sfabient	int error;
777240164Sfabient
778240164Sfabient	M_ASSERTPKTHDR(m0);
779241974Ssbruno
780240164Sfabient	flags |= BUS_DMA_NOWAIT;
781240164Sfabient	*nsegs = 0;
782240164Sfabient	error = 0;
783240164Sfabient	if (m0->m_pkthdr.len <= dmat->maxsize) {
784240164Sfabient		int first = 1;
785240164Sfabient		bus_addr_t lastaddr = 0;
786240164Sfabient		struct mbuf *m;
787240164Sfabient
788240164Sfabient		for (m = m0; m != NULL && error == 0; m = m->m_next) {
789248842Ssbruno			if (m->m_len > 0) {
790248842Ssbruno				error = _bus_dmamap_load_buffer(dmat, map,
791248842Ssbruno						m->m_data, m->m_len,
792248842Ssbruno						NULL, flags, &lastaddr,
793248842Ssbruno						segs, nsegs, first);
794248842Ssbruno				first = 0;
795248842Ssbruno			}
796248842Ssbruno		}
797248842Ssbruno	} else {
798248842Ssbruno		error = EINVAL;
799248842Ssbruno	}
800248842Ssbruno
801248842Ssbruno	/* XXX FIXME: Having to increment nsegs is really annoying */
802248842Ssbruno	++*nsegs;
803248842Ssbruno	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
804248842Ssbruno	    __func__, dmat, dmat->flags, error, *nsegs);
805248842Ssbruno	return (error);
806248842Ssbruno}
807248842Ssbruno
808248842Ssbrunoint
809248842Ssbrunobus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
810248842Ssbruno		     struct mbuf *m0,
811248842Ssbruno		     bus_dmamap_callback2_t *callback, void *callback_arg,
812248842Ssbruno		     int flags)
813248842Ssbruno{
814185363Sjkoshy	int nsegs, error;
815185363Sjkoshy
816185363Sjkoshy	error = _bus_dmamap_load_mbuf_sg(dmat, map, m0, dmat->segments, &nsegs,
817185363Sjkoshy	    flags);
818185363Sjkoshy
819240164Sfabient	if (error) {
820185363Sjkoshy		/* force "no valid mappings" in callback */
821185363Sjkoshy		(*callback)(callback_arg, dmat->segments, 0, 0, error);
822185363Sjkoshy	} else {
823185363Sjkoshy		(*callback)(callback_arg, dmat->segments,
824185363Sjkoshy			    nsegs, m0->m_pkthdr.len, error);
825185363Sjkoshy	}
826206089Sfabient	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
827185363Sjkoshy	    __func__, dmat, dmat->flags, error, nsegs);
828185363Sjkoshy	return (error);
829185363Sjkoshy}
830185363Sjkoshy
831185363Sjkoshyint
832185363Sjkoshybus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
833185363Sjkoshy			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
834185363Sjkoshy			int flags)
835185363Sjkoshy{
836185363Sjkoshy	return (_bus_dmamap_load_mbuf_sg(dmat, map, m0, segs, nsegs, flags));
837185363Sjkoshy}
838185363Sjkoshy
839185363Sjkoshy/*
840185363Sjkoshy * Like _bus_dmamap_load(), but for uios.
841185363Sjkoshy */
842185363Sjkoshyint
843185363Sjkoshybus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
844185363Sjkoshy		    struct uio *uio,
845185363Sjkoshy		    bus_dmamap_callback2_t *callback, void *callback_arg,
846185363Sjkoshy		    int flags)
847185363Sjkoshy{
848185363Sjkoshy	bus_addr_t lastaddr;
849185363Sjkoshy	int nsegs, error, first, i;
850185363Sjkoshy	bus_size_t resid;
851185363Sjkoshy	struct iovec *iov;
852193809Sjkoshy	pmap_t pmap;
853185363Sjkoshy
854185363Sjkoshy	flags |= BUS_DMA_NOWAIT;
855185363Sjkoshy	resid = uio->uio_resid;
856193809Sjkoshy	iov = uio->uio_iov;
857185363Sjkoshy
858185363Sjkoshy	if (uio->uio_segflg == UIO_USERSPACE) {
859185363Sjkoshy		KASSERT(uio->uio_td != NULL,
860193809Sjkoshy			("bus_dmamap_load_uio: USERSPACE but no proc"));
861185363Sjkoshy		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
862185363Sjkoshy	} else
863185363Sjkoshy		pmap = NULL;
864193809Sjkoshy
865185363Sjkoshy	nsegs = 0;
866185363Sjkoshy	error = 0;
867193809Sjkoshy	first = 1;
868185363Sjkoshy	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
869185363Sjkoshy		/*
870185363Sjkoshy		 * Now at the first iovec to load.  Load each iovec
871185363Sjkoshy		 * until we have exhausted the residual count.
872263446Shiren		 */
873185585Sjkoshy		bus_size_t minlen =
874206089Sfabient			resid < iov[i].iov_len ? resid : iov[i].iov_len;
875193809Sjkoshy		caddr_t addr = (caddr_t) iov[i].iov_base;
876185363Sjkoshy
877185363Sjkoshy		if (minlen > 0) {
878193809Sjkoshy			error = _bus_dmamap_load_buffer(dmat, map,
879185363Sjkoshy					addr, minlen, pmap, flags, &lastaddr,
880185363Sjkoshy					dmat->segments, &nsegs, first);
881185363Sjkoshy			first = 0;
882185363Sjkoshy
883206089Sfabient			resid -= minlen;
884267062Skib		}
885267062Skib	}
886267062Skib
887206089Sfabient	if (error) {
888240164Sfabient		/* force "no valid mappings" in callback */
889206089Sfabient		(*callback)(callback_arg, dmat->segments, 0, 0, error);
890206089Sfabient	} else {
891240164Sfabient		(*callback)(callback_arg, dmat->segments,
892241738Ssbruno			    nsegs+1, uio->uio_resid, error);
893246166Ssbruno	}
894246166Ssbruno	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
895240164Sfabient	    __func__, dmat, dmat->flags, error, nsegs + 1);
896241738Ssbruno	return (error);
897240164Sfabient}
898240164Sfabient
899248842Ssbruno/*
900248842Ssbruno * Release the mapping held by map.
901248842Ssbruno */
902248842Ssbrunovoid
903248842Ssbruno_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
904185363Sjkoshy{
905185363Sjkoshy	struct bounce_page *bpage;
906185363Sjkoshy
907185363Sjkoshy	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
908185363Sjkoshy		STAILQ_REMOVE_HEAD(&map->bpages, links);
909185363Sjkoshy		free_bounce_page(dmat, bpage);
910185363Sjkoshy	}
911185363Sjkoshy}
912185363Sjkoshy
913185363Sjkoshyvoid
914185363Sjkoshy_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
915185363Sjkoshy{
916185363Sjkoshy	struct bounce_page *bpage;
917185363Sjkoshy
918185363Sjkoshy	if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
919185363Sjkoshy		/*
920185363Sjkoshy		 * Handle data bouncing.  We might also
921185363Sjkoshy		 * want to add support for invalidating
922185363Sjkoshy		 * the caches on broken hardware
923185363Sjkoshy		 */
924185363Sjkoshy		dmat->bounce_zone->total_bounced++;
925185363Sjkoshy		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x "
926185363Sjkoshy		    "performing bounce", __func__, op, dmat, dmat->flags);
927185363Sjkoshy
928185363Sjkoshy		if (op & BUS_DMASYNC_PREWRITE) {
929185363Sjkoshy			while (bpage != NULL) {
930207482Srstone				bcopy((void *)bpage->datavaddr,
931207482Srstone				      (void *)bpage->vaddr,
932207482Srstone				      bpage->datacount);
933207482Srstone				bpage = STAILQ_NEXT(bpage, links);
934207482Srstone			}
935207482Srstone		}
936207482Srstone
937207482Srstone		if (op & BUS_DMASYNC_POSTREAD) {
938185363Sjkoshy			while (bpage != NULL) {
939185363Sjkoshy				bcopy((void *)bpage->vaddr,
940185363Sjkoshy				      (void *)bpage->datavaddr,
941185363Sjkoshy				      bpage->datacount);
942185363Sjkoshy				bpage = STAILQ_NEXT(bpage, links);
943206089Sfabient			}
944185363Sjkoshy		}
945185363Sjkoshy	}
946185363Sjkoshy}
947185363Sjkoshy
948185363Sjkoshystatic void
949206089Sfabientinit_bounce_pages(void *dummy __unused)
950206089Sfabient{
951206089Sfabient
952206089Sfabient	total_bpages = 0;
953206089Sfabient	STAILQ_INIT(&bounce_zone_list);
954206089Sfabient	STAILQ_INIT(&bounce_map_waitinglist);
955206089Sfabient	STAILQ_INIT(&bounce_map_callbacklist);
956206089Sfabient	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
957206089Sfabient}
958206089SfabientSYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
959206089Sfabient
960206089Sfabientstatic struct sysctl_ctx_list *
961206089Sfabientbusdma_sysctl_tree(struct bounce_zone *bz)
962206089Sfabient{
963206089Sfabient	return (&bz->sysctl_tree);
964206089Sfabient}
965206089Sfabient
966206089Sfabientstatic struct sysctl_oid *
967206089Sfabientbusdma_sysctl_tree_top(struct bounce_zone *bz)
968206089Sfabient{
969206089Sfabient	return (bz->sysctl_tree_top);
970206089Sfabient}
971206089Sfabient
972206089Sfabientstatic int
973206089Sfabientalloc_bounce_zone(bus_dma_tag_t dmat)
974206089Sfabient{
975206089Sfabient	struct bounce_zone *bz;
976206089Sfabient
977206089Sfabient	/* Check to see if we already have a suitable zone */
978206089Sfabient	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
979206089Sfabient		if ((dmat->alignment <= bz->alignment)
980206089Sfabient		 && (dmat->boundary <= bz->boundary)
981206089Sfabient		 && (dmat->lowaddr >= bz->lowaddr)) {
982206089Sfabient			dmat->bounce_zone = bz;
983206089Sfabient			return (0);
984206089Sfabient		}
985206089Sfabient	}
986206089Sfabient
987206089Sfabient	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
988206089Sfabient	    M_NOWAIT | M_ZERO)) == NULL)
989206089Sfabient		return (ENOMEM);
990206089Sfabient
991206089Sfabient	STAILQ_INIT(&bz->bounce_page_list);
992206089Sfabient	bz->free_bpages = 0;
993206089Sfabient	bz->reserved_bpages = 0;
994206089Sfabient	bz->active_bpages = 0;
995206089Sfabient	bz->lowaddr = dmat->lowaddr;
996206089Sfabient	bz->alignment = dmat->alignment;
997206089Sfabient	bz->boundary = dmat->boundary;
998206089Sfabient	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
999206089Sfabient	busdma_zonecount++;
1000206089Sfabient	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1001206089Sfabient	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1002206089Sfabient	dmat->bounce_zone = bz;
1003206089Sfabient
1004206089Sfabient	sysctl_ctx_init(&bz->sysctl_tree);
1005206089Sfabient	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1006206089Sfabient	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1007206089Sfabient	    CTLFLAG_RD, 0, "");
1008206089Sfabient	if (bz->sysctl_tree_top == NULL) {
1009206089Sfabient		sysctl_ctx_free(&bz->sysctl_tree);
1010206089Sfabient		return (0);	/* XXX error code? */
1011147191Sjkoshy	}
1012147191Sjkoshy
1013147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1014147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1015147191Sjkoshy	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1016147191Sjkoshy	    "Total bounce pages");
1017147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1018147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1019147191Sjkoshy	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1020147191Sjkoshy	    "Free bounce pages");
1021147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1022147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1023147191Sjkoshy	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1024183107Sjkoshy	    "Reserved bounce pages");
1025147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1026155998Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1027147191Sjkoshy	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1028147191Sjkoshy	    "Active bounce pages");
1029147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1030147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1031147191Sjkoshy	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1032147191Sjkoshy	    "Total bounce requests");
1033147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1034147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1035147191Sjkoshy	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1036147191Sjkoshy	    "Total bounce requests that were deferred");
1037147191Sjkoshy	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1038147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1039147191Sjkoshy	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1040147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1041147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1042147191Sjkoshy	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1043147191Sjkoshy	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1044147191Sjkoshy	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1045147191Sjkoshy	    "boundary", CTLFLAG_RD, &bz->boundary, 0, "");
1046147191Sjkoshy
1047147191Sjkoshy	return (0);
1048147191Sjkoshy}
1049147191Sjkoshy
1050147191Sjkoshystatic int
1051147191Sjkoshyalloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1052147191Sjkoshy{
1053147191Sjkoshy	struct bounce_zone *bz;
1054147191Sjkoshy	int count;
1055147191Sjkoshy
1056147191Sjkoshy	bz = dmat->bounce_zone;
1057147191Sjkoshy	count = 0;
1058147191Sjkoshy	while (numpages > 0) {
1059147191Sjkoshy		struct bounce_page *bpage;
1060147191Sjkoshy
1061147191Sjkoshy		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1062147191Sjkoshy						     M_NOWAIT | M_ZERO);
1063147191Sjkoshy
1064147191Sjkoshy		if (bpage == NULL)
1065147191Sjkoshy			break;
1066147191Sjkoshy		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1067147191Sjkoshy							 M_NOWAIT, 0ul,
1068147191Sjkoshy							 bz->lowaddr,
1069147191Sjkoshy							 PAGE_SIZE,
1070147191Sjkoshy							 bz->boundary);
1071147191Sjkoshy		if (bpage->vaddr == 0) {
1072147191Sjkoshy			free(bpage, M_DEVBUF);
1073147191Sjkoshy			break;
1074147191Sjkoshy		}
1075147191Sjkoshy		bpage->busaddr = pmap_kextract(bpage->vaddr);
1076147191Sjkoshy		mtx_lock(&bounce_lock);
1077147191Sjkoshy		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1078147191Sjkoshy		total_bpages++;
1079147191Sjkoshy		bz->total_bpages++;
1080147191Sjkoshy		bz->free_bpages++;
1081147191Sjkoshy		mtx_unlock(&bounce_lock);
1082147191Sjkoshy		count++;
1083147191Sjkoshy		numpages--;
1084147191Sjkoshy	}
1085147191Sjkoshy	return (count);
1086147191Sjkoshy}
1087147191Sjkoshy
1088147191Sjkoshystatic int
1089147191Sjkoshyreserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1090147191Sjkoshy{
1091147191Sjkoshy	struct bounce_zone *bz;
1092147191Sjkoshy	int pages;
1093147191Sjkoshy
1094147191Sjkoshy	mtx_assert(&bounce_lock, MA_OWNED);
1095147191Sjkoshy	bz = dmat->bounce_zone;
1096147191Sjkoshy	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1097147191Sjkoshy	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1098147191Sjkoshy		return (map->pagesneeded - (map->pagesreserved + pages));
1099147191Sjkoshy	bz->free_bpages -= pages;
1100147191Sjkoshy	bz->reserved_bpages += pages;
1101147191Sjkoshy	map->pagesreserved += pages;
1102147191Sjkoshy	pages = map->pagesneeded - map->pagesreserved;
1103147191Sjkoshy
1104147191Sjkoshy	return (pages);
1105147191Sjkoshy}
1106147191Sjkoshy
1107147191Sjkoshystatic bus_addr_t
1108147191Sjkoshyadd_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1109147191Sjkoshy		bus_size_t size)
1110147191Sjkoshy{
1111147191Sjkoshy	struct bounce_zone *bz;
1112147191Sjkoshy	struct bounce_page *bpage;
1113147191Sjkoshy
1114147191Sjkoshy	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1115147191Sjkoshy	KASSERT(map != NULL && map != &nobounce_dmamap,
1116147191Sjkoshy	    ("add_bounce_page: bad map %p", map));
1117147191Sjkoshy
1118147191Sjkoshy	bz = dmat->bounce_zone;
1119147191Sjkoshy	if (map->pagesneeded == 0)
1120147191Sjkoshy		panic("add_bounce_page: map doesn't need any pages");
1121147191Sjkoshy	map->pagesneeded--;
1122147191Sjkoshy
1123147191Sjkoshy	if (map->pagesreserved == 0)
1124147191Sjkoshy		panic("add_bounce_page: map doesn't need any pages");
1125147191Sjkoshy	map->pagesreserved--;
1126147191Sjkoshy
1127147191Sjkoshy	mtx_lock(&bounce_lock);
1128147191Sjkoshy	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1129147191Sjkoshy	if (bpage == NULL)
1130147191Sjkoshy		panic("add_bounce_page: free page list is empty");
1131147191Sjkoshy
1132147191Sjkoshy	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1133147191Sjkoshy	bz->reserved_bpages--;
1134147191Sjkoshy	bz->active_bpages++;
1135147191Sjkoshy	mtx_unlock(&bounce_lock);
1136147191Sjkoshy
1137147191Sjkoshy	bpage->datavaddr = vaddr;
1138147191Sjkoshy	bpage->datacount = size;
1139147191Sjkoshy	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1140147191Sjkoshy	return (bpage->busaddr);
1141147191Sjkoshy}
1142147191Sjkoshy
1143147191Sjkoshystatic void
1144147191Sjkoshyfree_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1145147191Sjkoshy{
1146147191Sjkoshy	struct bus_dmamap *map;
1147147191Sjkoshy	struct bounce_zone *bz;
1148147191Sjkoshy
1149147191Sjkoshy	bz = dmat->bounce_zone;
1150147191Sjkoshy	bpage->datavaddr = 0;
1151147191Sjkoshy	bpage->datacount = 0;
1152147191Sjkoshy
1153147191Sjkoshy	mtx_lock(&bounce_lock);
1154147191Sjkoshy	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1155147191Sjkoshy	bz->free_bpages++;
1156147191Sjkoshy	bz->active_bpages--;
1157147191Sjkoshy	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1158147191Sjkoshy		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1159147191Sjkoshy			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1160147191Sjkoshy			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1161147191Sjkoshy					   map, links);
1162147191Sjkoshy			busdma_swi_pending = 1;
1163147191Sjkoshy			bz->total_deferred++;
1164147191Sjkoshy			swi_sched(vm_ih, 0);
1165147191Sjkoshy		}
1166147191Sjkoshy	}
1167147191Sjkoshy	mtx_unlock(&bounce_lock);
1168147191Sjkoshy}
1169147191Sjkoshy
1170147191Sjkoshyvoid
1171147191Sjkoshybusdma_swi(void)
1172147191Sjkoshy{
1173147191Sjkoshy	bus_dma_tag_t dmat;
1174147191Sjkoshy	struct bus_dmamap *map;
1175147191Sjkoshy
1176147191Sjkoshy	mtx_lock(&bounce_lock);
1177147191Sjkoshy	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1178147191Sjkoshy		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1179147191Sjkoshy		mtx_unlock(&bounce_lock);
1180147191Sjkoshy		dmat = map->dmat;
1181147191Sjkoshy		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1182147191Sjkoshy		bus_dmamap_load(map->dmat, map, map->buf, map->buflen,
1183147191Sjkoshy				map->callback, map->callback_arg, /*flags*/0);
1184147191Sjkoshy		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1185147191Sjkoshy		mtx_lock(&bounce_lock);
1186147191Sjkoshy	}
1187147191Sjkoshy	mtx_unlock(&bounce_lock);
1188147191Sjkoshy}
1189147191Sjkoshy