busdma_machdep.c revision 204689
1/*-
2 * Copyright (c) 2006 Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *  From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 204689 2010-03-04 05:23:08Z neel $");
31
32/*
33 * MIPS bus dma support routines
34 */
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/malloc.h>
39#include <sys/bus.h>
40#include <sys/interrupt.h>
41#include <sys/lock.h>
42#include <sys/proc.h>
43#include <sys/mutex.h>
44#include <sys/mbuf.h>
45#include <sys/uio.h>
46#include <sys/ktr.h>
47#include <sys/kernel.h>
48#include <sys/sysctl.h>
49
50#include <vm/vm.h>
51#include <vm/vm_page.h>
52#include <vm/vm_map.h>
53
54#include <machine/atomic.h>
55#include <machine/bus.h>
56#include <machine/cache.h>
57#include <machine/cpufunc.h>
58#include <machine/cpuinfo.h>
59#include <machine/md_var.h>
60
61#define MAX_BPAGES 64
62#define BUS_DMA_COULD_BOUNCE	BUS_DMA_BUS3
63#define BUS_DMA_MIN_ALLOC_COMP	BUS_DMA_BUS4
64
65struct bounce_zone;
66
67struct bus_dma_tag {
68	bus_dma_tag_t		parent;
69	bus_size_t		alignment;
70	bus_size_t		boundary;
71	bus_addr_t		lowaddr;
72	bus_addr_t		highaddr;
73	bus_dma_filter_t	*filter;
74	void			*filterarg;
75	bus_size_t		maxsize;
76	u_int			nsegments;
77	bus_size_t		maxsegsz;
78	int			flags;
79	int			ref_count;
80	int			map_count;
81	bus_dma_lock_t		*lockfunc;
82	void			*lockfuncarg;
83	struct bounce_zone *bounce_zone;
84};
85
86struct bounce_page {
87	vm_offset_t	vaddr;		/* kva of bounce buffer */
88	vm_offset_t	vaddr_nocache;	/* kva of bounce buffer uncached */
89	bus_addr_t	busaddr;	/* Physical address */
90	vm_offset_t	datavaddr;	/* kva of client data */
91	bus_size_t	datacount;	/* client data count */
92	STAILQ_ENTRY(bounce_page) links;
93};
94
95int busdma_swi_pending;
96
97struct bounce_zone {
98	STAILQ_ENTRY(bounce_zone) links;
99	STAILQ_HEAD(bp_list, bounce_page) bounce_page_list;
100	int		total_bpages;
101	int		free_bpages;
102	int		reserved_bpages;
103	int		active_bpages;
104	int		total_bounced;
105	int		total_deferred;
106	int		map_count;
107	bus_size_t	alignment;
108	bus_addr_t	lowaddr;
109	char		zoneid[8];
110	char		lowaddrid[20];
111	struct sysctl_ctx_list sysctl_tree;
112	struct sysctl_oid *sysctl_tree_top;
113};
114
115static struct mtx bounce_lock;
116static int total_bpages;
117static int busdma_zonecount;
118static STAILQ_HEAD(, bounce_zone) bounce_zone_list;
119
120SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
121SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
122	   "Total bounce pages");
123
124#define DMAMAP_LINEAR		0x1
125#define DMAMAP_MBUF		0x2
126#define DMAMAP_UIO		0x4
127#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
128#define DMAMAP_UNCACHEABLE	0x8
129#define DMAMAP_ALLOCATED	0x10
130#define DMAMAP_MALLOCUSED	0x20
131
132struct bus_dmamap {
133	struct bp_list	bpages;
134	int		pagesneeded;
135	int		pagesreserved;
136        bus_dma_tag_t	dmat;
137	int		flags;
138	void 		*buffer;
139	void		*origbuffer;
140	void		*allocbuffer;
141	TAILQ_ENTRY(bus_dmamap)	freelist;
142	int		len;
143	STAILQ_ENTRY(bus_dmamap) links;
144	bus_dmamap_callback_t *callback;
145	void		      *callback_arg;
146
147};
148
149static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
150static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
151
152static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
153	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
154
155#define BUSDMA_STATIC_MAPS	500
156static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
157
158static struct mtx busdma_mtx;
159
160MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
161
162static void init_bounce_pages(void *dummy);
163static int alloc_bounce_zone(bus_dma_tag_t dmat);
164static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages);
165static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map,
166				int commit);
167static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map,
168				   vm_offset_t vaddr, bus_size_t size);
169static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage);
170
171/* Default tag, as most drivers provide no parent tag. */
172bus_dma_tag_t mips_root_dma_tag;
173
174/*
175 * Return true if a match is made.
176 *
177 * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'.
178 *
179 * If paddr is within the bounds of the dma tag then call the filter callback
180 * to check for a match, if there is no filter callback then assume a match.
181 */
182static int
183run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
184{
185	int retval;
186
187	retval = 0;
188
189	do {
190		if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
191		 || ((paddr & (dmat->alignment - 1)) != 0))
192		 && (dmat->filter == NULL
193		  || (*dmat->filter)(dmat->filterarg, paddr) != 0))
194			retval = 1;
195
196		dmat = dmat->parent;
197	} while (retval == 0 && dmat != NULL);
198	return (retval);
199}
200
201static void
202mips_dmamap_freelist_init(void *dummy)
203{
204	int i;
205
206	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
207		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
208}
209
210SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
211
212/*
213 * Check to see if the specified page is in an allowed DMA range.
214 */
215
216static __inline int
217bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
218    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
219    int flags, vm_offset_t *lastaddrp, int *segp);
220
221static __inline int
222_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr)
223{
224	int i;
225	for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) {
226		if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1])
227		    || (lowaddr < phys_avail[i] &&
228		    highaddr > phys_avail[i]))
229			return (1);
230	}
231	return (0);
232}
233
234/*
235 * Convenience function for manipulating driver locks from busdma (during
236 * busdma_swi, for example).  Drivers that don't provide their own locks
237 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
238 * non-mutex locking scheme don't have to use this at all.
239 */
240void
241busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
242{
243	struct mtx *dmtx;
244
245	dmtx = (struct mtx *)arg;
246	switch (op) {
247	case BUS_DMA_LOCK:
248		mtx_lock(dmtx);
249		break;
250	case BUS_DMA_UNLOCK:
251		mtx_unlock(dmtx);
252		break;
253	default:
254		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
255	}
256}
257
258/*
259 * dflt_lock should never get called.  It gets put into the dma tag when
260 * lockfunc == NULL, which is only valid if the maps that are associated
261 * with the tag are meant to never be defered.
262 * XXX Should have a way to identify which driver is responsible here.
263 */
264static void
265dflt_lock(void *arg, bus_dma_lock_op_t op)
266{
267#ifdef INVARIANTS
268	panic("driver error: busdma dflt_lock called");
269#else
270	printf("DRIVER_ERROR: busdma dflt_lock called\n");
271#endif
272}
273
274static __inline bus_dmamap_t
275_busdma_alloc_dmamap(void)
276{
277	bus_dmamap_t map;
278
279	mtx_lock(&busdma_mtx);
280	map = TAILQ_FIRST(&dmamap_freelist);
281	if (map)
282		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
283	mtx_unlock(&busdma_mtx);
284	if (!map) {
285		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
286		if (map)
287			map->flags = DMAMAP_ALLOCATED;
288	} else
289		map->flags = 0;
290	STAILQ_INIT(&map->bpages);
291	return (map);
292}
293
294static __inline void
295_busdma_free_dmamap(bus_dmamap_t map)
296{
297	if (map->flags & DMAMAP_ALLOCATED)
298		free(map, M_DEVBUF);
299	else {
300		mtx_lock(&busdma_mtx);
301		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
302		mtx_unlock(&busdma_mtx);
303	}
304}
305
306/*
307 * Allocate a device specific dma_tag.
308 */
309#define SEG_NB 1024
310
311int
312bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
313		   bus_size_t boundary, bus_addr_t lowaddr,
314		   bus_addr_t highaddr, bus_dma_filter_t *filter,
315		   void *filterarg, bus_size_t maxsize, int nsegments,
316		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
317		   void *lockfuncarg, bus_dma_tag_t *dmat)
318{
319	bus_dma_tag_t newtag;
320	int error = 0;
321	/* Return a NULL tag on failure */
322	*dmat = NULL;
323	if (!parent)
324		parent = mips_root_dma_tag;
325
326	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
327	if (newtag == NULL) {
328		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
329		    __func__, newtag, 0, error);
330		return (ENOMEM);
331	}
332
333	newtag->parent = parent;
334	newtag->alignment = alignment;
335	newtag->boundary = boundary;
336	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
337	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
338	newtag->filter = filter;
339	newtag->filterarg = filterarg;
340        newtag->maxsize = maxsize;
341        newtag->nsegments = nsegments;
342	newtag->maxsegsz = maxsegsz;
343	newtag->flags = flags;
344	if (cpuinfo.cache_coherent_dma)
345		newtag->flags |= BUS_DMA_COHERENT;
346	newtag->ref_count = 1; /* Count ourself */
347	newtag->map_count = 0;
348	if (lockfunc != NULL) {
349		newtag->lockfunc = lockfunc;
350		newtag->lockfuncarg = lockfuncarg;
351	} else {
352		newtag->lockfunc = dflt_lock;
353		newtag->lockfuncarg = NULL;
354	}
355        /*
356	 * Take into account any restrictions imposed by our parent tag
357	 */
358        if (parent != NULL) {
359                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
360                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
361		if (newtag->boundary == 0)
362			newtag->boundary = parent->boundary;
363		else if (parent->boundary != 0)
364                	newtag->boundary = min(parent->boundary,
365					       newtag->boundary);
366		if ((newtag->filter != NULL) ||
367		    ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0))
368			newtag->flags |= BUS_DMA_COULD_BOUNCE;
369                if (newtag->filter == NULL) {
370                        /*
371                         * Short circuit looking at our parent directly
372                         * since we have encapsulated all of its information
373                         */
374                        newtag->filter = parent->filter;
375                        newtag->filterarg = parent->filterarg;
376                        newtag->parent = parent->parent;
377		}
378		if (newtag->parent != NULL)
379			atomic_add_int(&parent->ref_count, 1);
380	}
381	if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr)
382	 || newtag->alignment > 1)
383		newtag->flags |= BUS_DMA_COULD_BOUNCE;
384
385	if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
386	    (flags & BUS_DMA_ALLOCNOW) != 0) {
387		struct bounce_zone *bz;
388
389		/* Must bounce */
390
391		if ((error = alloc_bounce_zone(newtag)) != 0) {
392			free(newtag, M_DEVBUF);
393			return (error);
394		}
395		bz = newtag->bounce_zone;
396
397		if (ptoa(bz->total_bpages) < maxsize) {
398			int pages;
399
400			pages = atop(maxsize) - bz->total_bpages;
401
402			/* Add pages to our bounce pool */
403			if (alloc_bounce_pages(newtag, pages) < pages)
404				error = ENOMEM;
405		}
406		/* Performed initial allocation */
407		newtag->flags |= BUS_DMA_MIN_ALLOC_COMP;
408	} else
409		newtag->bounce_zone = NULL;
410	if (error != 0)
411		free(newtag, M_DEVBUF);
412	else
413		*dmat = newtag;
414	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
415	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
416
417	return (error);
418}
419
420int
421bus_dma_tag_destroy(bus_dma_tag_t dmat)
422{
423#ifdef KTR
424	bus_dma_tag_t dmat_copy = dmat;
425#endif
426
427	if (dmat != NULL) {
428
429                if (dmat->map_count != 0)
430                        return (EBUSY);
431
432                while (dmat != NULL) {
433                        bus_dma_tag_t parent;
434
435                        parent = dmat->parent;
436                        atomic_subtract_int(&dmat->ref_count, 1);
437                        if (dmat->ref_count == 0) {
438                                free(dmat, M_DEVBUF);
439                                /*
440                                 * Last reference count, so
441                                 * release our reference
442                                 * count on our parent.
443                                 */
444                                dmat = parent;
445                        } else
446                                dmat = NULL;
447                }
448        }
449	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
450
451        return (0);
452}
453
454#include <sys/kdb.h>
455/*
456 * Allocate a handle for mapping from kva/uva/physical
457 * address space into bus device space.
458 */
459int
460bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
461{
462	bus_dmamap_t newmap;
463	int error = 0;
464
465	newmap = _busdma_alloc_dmamap();
466	if (newmap == NULL) {
467		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
468		return (ENOMEM);
469	}
470	*mapp = newmap;
471	newmap->dmat = dmat;
472	newmap->allocbuffer = NULL;
473	dmat->map_count++;
474
475	/*
476	 * Bouncing might be required if the driver asks for an active
477	 * exclusion region, a data alignment that is stricter than 1, and/or
478	 * an active address boundary.
479	 */
480	if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
481
482		/* Must bounce */
483		struct bounce_zone *bz;
484		int maxpages;
485
486		if (dmat->bounce_zone == NULL) {
487			if ((error = alloc_bounce_zone(dmat)) != 0) {
488				_busdma_free_dmamap(newmap);
489				*mapp = NULL;
490				return (error);
491			}
492		}
493		bz = dmat->bounce_zone;
494
495		/* Initialize the new map */
496		STAILQ_INIT(&((*mapp)->bpages));
497
498		/*
499		 * Attempt to add pages to our pool on a per-instance
500		 * basis up to a sane limit.
501		 */
502		maxpages = MAX_BPAGES;
503		if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0
504		 || (bz->map_count > 0 && bz->total_bpages < maxpages)) {
505			int pages;
506
507			pages = MAX(atop(dmat->maxsize), 1);
508			pages = MIN(maxpages - bz->total_bpages, pages);
509			pages = MAX(pages, 1);
510			if (alloc_bounce_pages(dmat, pages) < pages)
511				error = ENOMEM;
512
513			if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) {
514				if (error == 0)
515					dmat->flags |= BUS_DMA_MIN_ALLOC_COMP;
516			} else {
517				error = 0;
518			}
519		}
520		bz->map_count++;
521	}
522
523	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
524	    __func__, dmat, dmat->flags, error);
525
526	return (0);
527}
528
529/*
530 * Destroy a handle for mapping from kva/uva/physical
531 * address space into bus device space.
532 */
533int
534bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
535{
536
537	_busdma_free_dmamap(map);
538	if (STAILQ_FIRST(&map->bpages) != NULL) {
539		CTR3(KTR_BUSDMA, "%s: tag %p error %d",
540		    __func__, dmat, EBUSY);
541		return (EBUSY);
542	}
543	if (dmat->bounce_zone)
544		dmat->bounce_zone->map_count--;
545        dmat->map_count--;
546	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
547        return (0);
548}
549
550/*
551 * Allocate a piece of memory that can be efficiently mapped into
552 * bus device space based on the constraints lited in the dma tag.
553 * A dmamap to for use with dmamap_load is also allocated.
554 */
555int
556bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
557                 bus_dmamap_t *mapp)
558{
559	bus_dmamap_t newmap = NULL;
560
561	int mflags;
562
563	if (flags & BUS_DMA_NOWAIT)
564		mflags = M_NOWAIT;
565	else
566		mflags = M_WAITOK;
567	if (flags & BUS_DMA_ZERO)
568		mflags |= M_ZERO;
569
570	newmap = _busdma_alloc_dmamap();
571	if (newmap == NULL) {
572		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
573		    __func__, dmat, dmat->flags, ENOMEM);
574		return (ENOMEM);
575	}
576	dmat->map_count++;
577	*mapp = newmap;
578	newmap->dmat = dmat;
579
580	/*
581	 * If all the memory is coherent with DMA then we don't need to
582	 * do anything special for a coherent mapping request.
583	 */
584	if (dmat->flags & BUS_DMA_COHERENT)
585	    flags &= ~BUS_DMA_COHERENT;
586
587	/*
588	 * Allocate uncacheable memory if all else fails.
589	 */
590	if (flags & BUS_DMA_COHERENT)
591	    newmap->flags |= DMAMAP_UNCACHEABLE;
592
593        if (dmat->maxsize <= PAGE_SIZE &&
594	   (dmat->alignment < dmat->maxsize) &&
595	   !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr) &&
596	   !(newmap->flags & DMAMAP_UNCACHEABLE)) {
597                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
598		newmap->flags |= DMAMAP_MALLOCUSED;
599        } else {
600                /*
601                 * XXX Use Contigmalloc until it is merged into this facility
602                 *     and handles multi-seg allocations.  Nobody is doing
603                 *     multi-seg allocations yet though.
604                 */
605	         vm_paddr_t maxphys;
606	         if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) {
607		   /* Note in the else case I just put in what was already
608		    * being passed in dmat->lowaddr. I am not sure
609		    * how this would have worked. Since lowaddr is in the
610		    * max address postion. I would have thought that the
611		    * caller would have wanted dmat->highaddr. That is
612		    * presuming they are asking for physical addresses
613		    * which is what contigmalloc takes. - RRS
614		    */
615		   maxphys = MIPS_KSEG0_LARGEST_PHYS - 1;
616		 } else {
617		   maxphys = dmat->lowaddr;
618		 }
619                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
620                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
621                    dmat->boundary);
622        }
623        if (*vaddr == NULL) {
624		if (newmap != NULL) {
625			_busdma_free_dmamap(newmap);
626			dmat->map_count--;
627		}
628		*mapp = NULL;
629                return (ENOMEM);
630	}
631
632	if (newmap->flags & DMAMAP_UNCACHEABLE) {
633		void *tmpaddr = (void *)*vaddr;
634
635		if (tmpaddr) {
636			tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
637			newmap->origbuffer = *vaddr;
638			newmap->allocbuffer = tmpaddr;
639			mips_dcache_wbinv_range((vm_offset_t)*vaddr,
640			    dmat->maxsize);
641			*vaddr = tmpaddr;
642		} else
643			newmap->origbuffer = newmap->allocbuffer = NULL;
644	} else
645		newmap->origbuffer = newmap->allocbuffer = NULL;
646
647        return (0);
648}
649
650/*
651 * Free a piece of memory and it's allocated dmamap, that was allocated
652 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
653 */
654void
655bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
656{
657	if (map->allocbuffer) {
658		KASSERT(map->allocbuffer == vaddr,
659		    ("Trying to freeing the wrong DMA buffer"));
660		vaddr = map->origbuffer;
661	}
662
663        if (map->flags & DMAMAP_MALLOCUSED)
664		free(vaddr, M_DEVBUF);
665        else
666		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
667
668	dmat->map_count--;
669	_busdma_free_dmamap(map);
670	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
671}
672
673static int
674_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap,
675    void *buf, bus_size_t buflen, int flags)
676{
677	vm_offset_t vaddr;
678	vm_offset_t vendaddr;
679	bus_addr_t paddr;
680
681	if ((map->pagesneeded == 0)) {
682		CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d",
683		    dmat->lowaddr, dmat->boundary, dmat->alignment);
684		CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d",
685		    map, map->pagesneeded);
686		/*
687		 * Count the number of bounce pages
688		 * needed in order to complete this transfer
689		 */
690		vaddr = trunc_page((vm_offset_t)buf);
691		vendaddr = (vm_offset_t)buf + buflen;
692
693		while (vaddr < vendaddr) {
694			KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
695			paddr = pmap_kextract(vaddr);
696			if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
697			    run_filter(dmat, paddr) != 0)
698				map->pagesneeded++;
699			vaddr += PAGE_SIZE;
700		}
701		CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded);
702	}
703
704	/* Reserve Necessary Bounce Pages */
705	if (map->pagesneeded != 0) {
706		mtx_lock(&bounce_lock);
707		if (flags & BUS_DMA_NOWAIT) {
708			if (reserve_bounce_pages(dmat, map, 0) != 0) {
709				mtx_unlock(&bounce_lock);
710				return (ENOMEM);
711			}
712		} else {
713			if (reserve_bounce_pages(dmat, map, 1) != 0) {
714				/* Queue us for resources */
715				STAILQ_INSERT_TAIL(&bounce_map_waitinglist,
716				    map, links);
717				mtx_unlock(&bounce_lock);
718				return (EINPROGRESS);
719			}
720		}
721		mtx_unlock(&bounce_lock);
722	}
723
724	return (0);
725}
726
727/*
728 * Utility function to load a linear buffer.  lastaddrp holds state
729 * between invocations (for multiple-buffer loads).  segp contains
730 * the starting segment on entrance, and the ending segment on exit.
731 * first indicates if this is the first invocation of this function.
732 */
733static __inline int
734bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
735    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
736    int flags, vm_offset_t *lastaddrp, int *segp)
737{
738	bus_size_t sgsize;
739	bus_addr_t curaddr, lastaddr, baddr, bmask;
740	vm_offset_t vaddr = (vm_offset_t)buf;
741	int seg;
742	int error = 0;
743
744	lastaddr = *lastaddrp;
745	bmask = ~(dmat->boundary - 1);
746
747	if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) {
748		error = _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen,
749		    flags);
750		if (error)
751			return (error);
752	}
753	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
754	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
755
756	for (seg = *segp; buflen > 0 ; ) {
757		/*
758		 * Get the physical address for this segment.
759		 *
760		 * XXX Don't support checking for coherent mappings
761		 * XXX in user address space.
762		 */
763		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
764		curaddr = pmap_kextract(vaddr);
765
766		/*
767		 * Compute the segment size, and adjust counts.
768		 */
769		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
770		if (sgsize > dmat->maxsegsz)
771			sgsize = dmat->maxsegsz;
772		if (buflen < sgsize)
773			sgsize = buflen;
774
775		/*
776		 * Make sure we don't cross any boundaries.
777		 */
778		if (dmat->boundary > 0) {
779			baddr = (curaddr + dmat->boundary) & bmask;
780			if (sgsize > (baddr - curaddr))
781				sgsize = (baddr - curaddr);
782		}
783		if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
784		    map->pagesneeded != 0 && run_filter(dmat, curaddr)) {
785			curaddr = add_bounce_page(dmat, map, vaddr, sgsize);
786		}
787
788		/*
789		 * Insert chunk into a segment, coalescing with
790		 * the previous segment if possible.
791		 */
792		if (seg >= 0 && curaddr == lastaddr &&
793		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
794		    (dmat->boundary == 0 ||
795		     (segs[seg].ds_addr & bmask) ==
796		     (curaddr & bmask))) {
797			segs[seg].ds_len += sgsize;
798			goto segdone;
799		} else {
800			if (++seg >= dmat->nsegments)
801				break;
802			segs[seg].ds_addr = curaddr;
803			segs[seg].ds_len = sgsize;
804		}
805		if (error)
806			break;
807segdone:
808		lastaddr = curaddr + sgsize;
809		vaddr += sgsize;
810		buflen -= sgsize;
811	}
812
813	*segp = seg;
814	*lastaddrp = lastaddr;
815
816	/*
817	 * Did we fit?
818	 */
819	if (buflen != 0)
820		error = EFBIG; /* XXX better return value here? */
821	return (error);
822}
823
824/*
825 * Map the buffer buf into bus space using the dmamap map.
826 */
827int
828bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
829                bus_size_t buflen, bus_dmamap_callback_t *callback,
830                void *callback_arg, int flags)
831{
832     	vm_offset_t	lastaddr = 0;
833	int		error, nsegs = -1;
834#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
835	bus_dma_segment_t dm_segments[dmat->nsegments];
836#else
837	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
838#endif
839
840	KASSERT(dmat != NULL, ("dmatag is NULL"));
841	KASSERT(map != NULL, ("dmamap is NULL"));
842	map->callback = callback;
843	map->callback_arg = callback_arg;
844	map->flags &= ~DMAMAP_TYPE_MASK;
845	map->flags |= DMAMAP_LINEAR;
846	map->buffer = buf;
847	map->len = buflen;
848	error = bus_dmamap_load_buffer(dmat,
849	    dm_segments, map, buf, buflen, kernel_pmap,
850	    flags, &lastaddr, &nsegs);
851	if (error == EINPROGRESS)
852		return (error);
853	if (error)
854		(*callback)(callback_arg, NULL, 0, error);
855	else
856		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
857
858	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
859	    __func__, dmat, dmat->flags, nsegs + 1, error);
860
861	return (error);
862}
863
864/*
865 * Like bus_dmamap_load(), but for mbufs.
866 */
867int
868bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
869		     bus_dmamap_callback2_t *callback, void *callback_arg,
870		     int flags)
871{
872#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
873	bus_dma_segment_t dm_segments[dmat->nsegments];
874#else
875	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
876#endif
877	int nsegs = -1, error = 0;
878
879	M_ASSERTPKTHDR(m0);
880
881	map->flags &= ~DMAMAP_TYPE_MASK;
882	map->flags |= DMAMAP_MBUF;
883	map->buffer = m0;
884	map->len = 0;
885	if (m0->m_pkthdr.len <= dmat->maxsize) {
886		vm_offset_t lastaddr = 0;
887		struct mbuf *m;
888
889		for (m = m0; m != NULL && error == 0; m = m->m_next) {
890			if (m->m_len > 0) {
891				error = bus_dmamap_load_buffer(dmat,
892				    dm_segments, map, m->m_data, m->m_len,
893				    kernel_pmap, flags, &lastaddr, &nsegs);
894				map->len += m->m_len;
895			}
896		}
897	} else {
898		error = EINVAL;
899	}
900
901	if (error) {
902		/*
903		 * force "no valid mappings" on error in callback.
904		 */
905		(*callback)(callback_arg, dm_segments, 0, 0, error);
906	} else {
907		(*callback)(callback_arg, dm_segments, nsegs + 1,
908		    m0->m_pkthdr.len, error);
909	}
910	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
911	    __func__, dmat, dmat->flags, error, nsegs + 1);
912
913	return (error);
914}
915
916int
917bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
918			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
919			int flags)
920{
921	int error = 0;
922	M_ASSERTPKTHDR(m0);
923
924	flags |= BUS_DMA_NOWAIT;
925	*nsegs = -1;
926	map->flags &= ~DMAMAP_TYPE_MASK;
927	map->flags |= DMAMAP_MBUF;
928	map->buffer = m0;
929	map->len = 0;
930	if (m0->m_pkthdr.len <= dmat->maxsize) {
931		vm_offset_t lastaddr = 0;
932		struct mbuf *m;
933
934		for (m = m0; m != NULL && error == 0; m = m->m_next) {
935			if (m->m_len > 0) {
936				error = bus_dmamap_load_buffer(dmat, segs, map,
937						m->m_data, m->m_len,
938						kernel_pmap, flags, &lastaddr,
939						nsegs);
940				map->len += m->m_len;
941			}
942		}
943	} else {
944		error = EINVAL;
945	}
946
947	/* XXX FIXME: Having to increment nsegs is really annoying */
948	++*nsegs;
949	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
950	    __func__, dmat, dmat->flags, error, *nsegs);
951	return (error);
952}
953
954/*
955 * Like bus_dmamap_load(), but for uios.
956 */
957int
958bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
959    bus_dmamap_callback2_t *callback, void *callback_arg,
960    int flags)
961{
962	vm_offset_t lastaddr = 0;
963#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
964	bus_dma_segment_t dm_segments[dmat->nsegments];
965#else
966	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
967#endif
968	int nsegs, i, error;
969	bus_size_t resid;
970	struct iovec *iov;
971	struct pmap *pmap;
972
973	resid = uio->uio_resid;
974	iov = uio->uio_iov;
975	map->flags &= ~DMAMAP_TYPE_MASK;
976	map->flags |= DMAMAP_UIO;
977	map->buffer = uio;
978	map->len = 0;
979
980	if (uio->uio_segflg == UIO_USERSPACE) {
981		KASSERT(uio->uio_td != NULL,
982		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
983		/* XXX: pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace); */
984		panic("can't do it yet");
985	} else
986		pmap = kernel_pmap;
987
988	error = 0;
989	nsegs = -1;
990	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
991		/*
992		 * Now at the first iovec to load.  Load each iovec
993		 * until we have exhausted the residual count.
994		 */
995		bus_size_t minlen =
996		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
997		caddr_t addr = (caddr_t) iov[i].iov_base;
998
999		if (minlen > 0) {
1000			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
1001			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
1002
1003			map->len += minlen;
1004			resid -= minlen;
1005		}
1006	}
1007
1008	if (error) {
1009		/*
1010		 * force "no valid mappings" on error in callback.
1011		 */
1012		(*callback)(callback_arg, dm_segments, 0, 0, error);
1013	} else {
1014		(*callback)(callback_arg, dm_segments, nsegs+1,
1015		    uio->uio_resid, error);
1016	}
1017
1018	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
1019	    __func__, dmat, dmat->flags, error, nsegs + 1);
1020	return (error);
1021}
1022
1023/*
1024 * Release the mapping held by map.
1025 */
1026void
1027_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
1028{
1029	struct bounce_page *bpage;
1030
1031	map->flags &= ~DMAMAP_TYPE_MASK;
1032	while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
1033		STAILQ_REMOVE_HEAD(&map->bpages, links);
1034		free_bounce_page(dmat, bpage);
1035	}
1036	return;
1037}
1038
1039static void
1040bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
1041{
1042	char tmp_cl[mips_pdcache_linesize], tmp_clend[mips_pdcache_linesize];
1043	vm_offset_t buf_cl, buf_clend;
1044	vm_size_t size_cl, size_clend;
1045	int cache_linesize_mask = mips_pdcache_linesize - 1;
1046
1047	/*
1048	 * dcache invalidation operates on cache line aligned addresses
1049	 * and could modify areas of memory that share the same cache line
1050	 * at the beginning and the ending of the buffer. In order to
1051	 * prevent a data loss we save these chunks in temporary buffer
1052	 * before invalidation and restore them afer it
1053	 */
1054	buf_cl = (vm_offset_t)buf  & ~cache_linesize_mask;
1055	size_cl = (vm_offset_t)buf  & cache_linesize_mask;
1056	buf_clend = (vm_offset_t)buf + len;
1057	size_clend = (mips_pdcache_linesize -
1058	    (buf_clend & cache_linesize_mask)) & cache_linesize_mask;
1059
1060	switch (op) {
1061	case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
1062	case BUS_DMASYNC_POSTREAD:
1063
1064		/*
1065		 * Save buffers that might be modified by invalidation
1066		 */
1067		if (size_cl)
1068			memcpy (tmp_cl, (void*)buf_cl, size_cl);
1069		if (size_clend)
1070			memcpy (tmp_clend, (void*)buf_clend, size_clend);
1071		mips_dcache_inv_range((vm_offset_t)buf, len);
1072		/*
1073		 * Restore them
1074		 */
1075		if (size_cl)
1076			memcpy ((void*)buf_cl, tmp_cl, size_cl);
1077		if (size_clend)
1078			memcpy ((void*)buf_clend, tmp_clend, size_clend);
1079		/*
1080		 * Copies above have brought corresponding memory
1081		 * cache lines back into dirty state. Write them back
1082		 * out and invalidate affected cache lines again if
1083		 * necessary.
1084		 */
1085		if (size_cl)
1086			mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl);
1087		if (size_clend && (size_cl == 0 ||
1088                    buf_clend - buf_cl > mips_pdcache_linesize))
1089			mips_dcache_wbinv_range((vm_offset_t)buf_clend,
1090			   size_clend);
1091		break;
1092
1093	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
1094		mips_dcache_wbinv_range((vm_offset_t)buf_cl, len);
1095		break;
1096
1097	case BUS_DMASYNC_PREREAD:
1098		/*
1099		 * Save buffers that might be modified by invalidation
1100		 */
1101		if (size_cl)
1102			memcpy (tmp_cl, (void *)buf_cl, size_cl);
1103		if (size_clend)
1104			memcpy (tmp_clend, (void *)buf_clend, size_clend);
1105		mips_dcache_inv_range((vm_offset_t)buf, len);
1106		/*
1107		 * Restore them
1108		 */
1109		if (size_cl)
1110			memcpy ((void *)buf_cl, tmp_cl, size_cl);
1111		if (size_clend)
1112			memcpy ((void *)buf_clend, tmp_clend, size_clend);
1113		/*
1114		 * Copies above have brought corresponding memory
1115		 * cache lines back into dirty state. Write them back
1116		 * out and invalidate affected cache lines again if
1117		 * necessary.
1118		 */
1119		if (size_cl)
1120			mips_dcache_wbinv_range((vm_offset_t)buf_cl, size_cl);
1121		if (size_clend && (size_cl == 0 ||
1122                    buf_clend - buf_cl > mips_pdcache_linesize))
1123			mips_dcache_wbinv_range((vm_offset_t)buf_clend,
1124			   size_clend);
1125		break;
1126
1127	case BUS_DMASYNC_PREWRITE:
1128		mips_dcache_wb_range((vm_offset_t)buf, len);
1129		break;
1130	}
1131}
1132
1133static void
1134_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1135{
1136	struct bounce_page *bpage;
1137
1138	STAILQ_FOREACH(bpage, &map->bpages, links) {
1139		if (op & BUS_DMASYNC_PREWRITE) {
1140			bcopy((void *)bpage->datavaddr,
1141			    (void *)(bpage->vaddr_nocache != 0 ?
1142				     bpage->vaddr_nocache : bpage->vaddr),
1143			    bpage->datacount);
1144			if (bpage->vaddr_nocache == 0) {
1145				mips_dcache_wb_range(bpage->vaddr,
1146				    bpage->datacount);
1147			}
1148			dmat->bounce_zone->total_bounced++;
1149		}
1150		if (op & BUS_DMASYNC_POSTREAD) {
1151			if (bpage->vaddr_nocache == 0) {
1152				mips_dcache_inv_range(bpage->vaddr,
1153				    bpage->datacount);
1154			}
1155			bcopy((void *)(bpage->vaddr_nocache != 0 ?
1156	       		    bpage->vaddr_nocache : bpage->vaddr),
1157			    (void *)bpage->datavaddr, bpage->datacount);
1158			dmat->bounce_zone->total_bounced++;
1159		}
1160	}
1161}
1162
1163static __inline int
1164_bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len)
1165{
1166	struct bounce_page *bpage;
1167
1168	STAILQ_FOREACH(bpage, &map->bpages, links) {
1169		if ((vm_offset_t)buf >= bpage->datavaddr &&
1170		    (vm_offset_t)buf + len <= bpage->datavaddr +
1171		    bpage->datacount)
1172			return (1);
1173	}
1174	return (0);
1175
1176}
1177
1178void
1179_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
1180{
1181	struct mbuf *m;
1182	struct uio *uio;
1183	int resid;
1184	struct iovec *iov;
1185
1186	if (op == BUS_DMASYNC_POSTWRITE)
1187		return;
1188	if (STAILQ_FIRST(&map->bpages))
1189		_bus_dmamap_sync_bp(dmat, map, op);
1190
1191	if (dmat->flags & BUS_DMA_COHERENT)
1192		return;
1193
1194	if (map->flags & DMAMAP_UNCACHEABLE)
1195		return;
1196
1197	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
1198	switch(map->flags & DMAMAP_TYPE_MASK) {
1199	case DMAMAP_LINEAR:
1200		if (!(_bus_dma_buf_is_in_bp(map, map->buffer, map->len)))
1201			bus_dmamap_sync_buf(map->buffer, map->len, op);
1202		break;
1203	case DMAMAP_MBUF:
1204		m = map->buffer;
1205		while (m) {
1206			if (m->m_len > 0 &&
1207			    !(_bus_dma_buf_is_in_bp(map, m->m_data, m->m_len)))
1208				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
1209			m = m->m_next;
1210		}
1211		break;
1212	case DMAMAP_UIO:
1213		uio = map->buffer;
1214		iov = uio->uio_iov;
1215		resid = uio->uio_resid;
1216		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
1217			bus_size_t minlen = resid < iov[i].iov_len ? resid :
1218			    iov[i].iov_len;
1219			if (minlen > 0) {
1220				if (!_bus_dma_buf_is_in_bp(map, iov[i].iov_base,
1221				    minlen))
1222					bus_dmamap_sync_buf(iov[i].iov_base,
1223					    minlen, op);
1224				resid -= minlen;
1225			}
1226		}
1227		break;
1228	default:
1229		break;
1230	}
1231}
1232
1233static void
1234init_bounce_pages(void *dummy __unused)
1235{
1236
1237	total_bpages = 0;
1238	STAILQ_INIT(&bounce_zone_list);
1239	STAILQ_INIT(&bounce_map_waitinglist);
1240	STAILQ_INIT(&bounce_map_callbacklist);
1241	mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF);
1242}
1243SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL);
1244
1245static struct sysctl_ctx_list *
1246busdma_sysctl_tree(struct bounce_zone *bz)
1247{
1248	return (&bz->sysctl_tree);
1249}
1250
1251static struct sysctl_oid *
1252busdma_sysctl_tree_top(struct bounce_zone *bz)
1253{
1254	return (bz->sysctl_tree_top);
1255}
1256
1257static int
1258alloc_bounce_zone(bus_dma_tag_t dmat)
1259{
1260	struct bounce_zone *bz;
1261
1262	/* Check to see if we already have a suitable zone */
1263	STAILQ_FOREACH(bz, &bounce_zone_list, links) {
1264		if ((dmat->alignment <= bz->alignment)
1265		 && (dmat->lowaddr >= bz->lowaddr)) {
1266			dmat->bounce_zone = bz;
1267			return (0);
1268		}
1269	}
1270
1271	if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF,
1272	    M_NOWAIT | M_ZERO)) == NULL)
1273		return (ENOMEM);
1274
1275	STAILQ_INIT(&bz->bounce_page_list);
1276	bz->free_bpages = 0;
1277	bz->reserved_bpages = 0;
1278	bz->active_bpages = 0;
1279	bz->lowaddr = dmat->lowaddr;
1280	bz->alignment = MAX(dmat->alignment, PAGE_SIZE);
1281	bz->map_count = 0;
1282	snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount);
1283	busdma_zonecount++;
1284	snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr);
1285	STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links);
1286	dmat->bounce_zone = bz;
1287
1288	sysctl_ctx_init(&bz->sysctl_tree);
1289	bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree,
1290	    SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid,
1291	    CTLFLAG_RD, 0, "");
1292	if (bz->sysctl_tree_top == NULL) {
1293		sysctl_ctx_free(&bz->sysctl_tree);
1294		return (0);	/* XXX error code? */
1295	}
1296
1297	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1298	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1299	    "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0,
1300	    "Total bounce pages");
1301	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1302	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1303	    "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0,
1304	    "Free bounce pages");
1305	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1306	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1307	    "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0,
1308	    "Reserved bounce pages");
1309	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1310	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1311	    "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0,
1312	    "Active bounce pages");
1313	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1314	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1315	    "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0,
1316	    "Total bounce requests");
1317	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1318	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1319	    "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0,
1320	    "Total bounce requests that were deferred");
1321	SYSCTL_ADD_STRING(busdma_sysctl_tree(bz),
1322	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1323	    "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, "");
1324	SYSCTL_ADD_INT(busdma_sysctl_tree(bz),
1325	    SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO,
1326	    "alignment", CTLFLAG_RD, &bz->alignment, 0, "");
1327
1328	return (0);
1329}
1330
1331static int
1332alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages)
1333{
1334	struct bounce_zone *bz;
1335	int count;
1336
1337	bz = dmat->bounce_zone;
1338	count = 0;
1339	while (numpages > 0) {
1340		struct bounce_page *bpage;
1341
1342		bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF,
1343						     M_NOWAIT | M_ZERO);
1344
1345		if (bpage == NULL)
1346			break;
1347		bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF,
1348							 M_NOWAIT, 0ul,
1349							 bz->lowaddr,
1350							 PAGE_SIZE,
1351							 0);
1352		if (bpage->vaddr == 0) {
1353			free(bpage, M_DEVBUF);
1354			break;
1355		}
1356		bpage->busaddr = pmap_kextract(bpage->vaddr);
1357		bpage->vaddr_nocache =
1358		    (vm_offset_t)MIPS_PHYS_TO_KSEG1(bpage->busaddr);
1359		mtx_lock(&bounce_lock);
1360		STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links);
1361		total_bpages++;
1362		bz->total_bpages++;
1363		bz->free_bpages++;
1364		mtx_unlock(&bounce_lock);
1365		count++;
1366		numpages--;
1367	}
1368	return (count);
1369}
1370
1371static int
1372reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit)
1373{
1374	struct bounce_zone *bz;
1375	int pages;
1376
1377	mtx_assert(&bounce_lock, MA_OWNED);
1378	bz = dmat->bounce_zone;
1379	pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved);
1380	if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages))
1381		return (map->pagesneeded - (map->pagesreserved + pages));
1382	bz->free_bpages -= pages;
1383	bz->reserved_bpages += pages;
1384	map->pagesreserved += pages;
1385	pages = map->pagesneeded - map->pagesreserved;
1386
1387	return (pages);
1388}
1389
1390static bus_addr_t
1391add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
1392		bus_size_t size)
1393{
1394	struct bounce_zone *bz;
1395	struct bounce_page *bpage;
1396
1397	KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
1398	KASSERT(map != NULL, ("add_bounce_page: bad map %p", map));
1399
1400	bz = dmat->bounce_zone;
1401	if (map->pagesneeded == 0)
1402		panic("add_bounce_page: map doesn't need any pages");
1403	map->pagesneeded--;
1404
1405	if (map->pagesreserved == 0)
1406		panic("add_bounce_page: map doesn't need any pages");
1407	map->pagesreserved--;
1408
1409	mtx_lock(&bounce_lock);
1410	bpage = STAILQ_FIRST(&bz->bounce_page_list);
1411	if (bpage == NULL)
1412		panic("add_bounce_page: free page list is empty");
1413
1414	STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links);
1415	bz->reserved_bpages--;
1416	bz->active_bpages++;
1417	mtx_unlock(&bounce_lock);
1418
1419	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1420		/* Page offset needs to be preserved. */
1421		bpage->vaddr |= vaddr & PAGE_MASK;
1422		bpage->busaddr |= vaddr & PAGE_MASK;
1423	}
1424	bpage->datavaddr = vaddr;
1425	bpage->datacount = size;
1426	STAILQ_INSERT_TAIL(&(map->bpages), bpage, links);
1427	return (bpage->busaddr);
1428}
1429
1430static void
1431free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage)
1432{
1433	struct bus_dmamap *map;
1434	struct bounce_zone *bz;
1435
1436	bz = dmat->bounce_zone;
1437	bpage->datavaddr = 0;
1438	bpage->datacount = 0;
1439	if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) {
1440		/*
1441		 * Reset the bounce page to start at offset 0.  Other uses
1442		 * of this bounce page may need to store a full page of
1443		 * data and/or assume it starts on a page boundary.
1444		 */
1445		bpage->vaddr &= ~PAGE_MASK;
1446		bpage->busaddr &= ~PAGE_MASK;
1447	}
1448
1449	mtx_lock(&bounce_lock);
1450	STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links);
1451	bz->free_bpages++;
1452	bz->active_bpages--;
1453	if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) {
1454		if (reserve_bounce_pages(map->dmat, map, 1) == 0) {
1455			STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links);
1456			STAILQ_INSERT_TAIL(&bounce_map_callbacklist,
1457					   map, links);
1458			busdma_swi_pending = 1;
1459			bz->total_deferred++;
1460			swi_sched(vm_ih, 0);
1461		}
1462	}
1463	mtx_unlock(&bounce_lock);
1464}
1465
1466void
1467busdma_swi(void)
1468{
1469	bus_dma_tag_t dmat;
1470	struct bus_dmamap *map;
1471
1472	mtx_lock(&bounce_lock);
1473	while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) {
1474		STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links);
1475		mtx_unlock(&bounce_lock);
1476		dmat = map->dmat;
1477		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK);
1478		bus_dmamap_load(map->dmat, map, map->buffer, map->len,
1479		    map->callback, map->callback_arg, /*flags*/0);
1480		(dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK);
1481		mtx_lock(&bounce_lock);
1482	}
1483	mtx_unlock(&bounce_lock);
1484}
1485