busdma_machdep-v4.c revision 156191
1/*-
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 156191 2006-03-01 23:04:25Z cognet $");
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/lock.h>
45#include <sys/proc.h>
46#include <sys/mutex.h>
47#include <sys/mbuf.h>
48#include <sys/uio.h>
49#include <sys/ktr.h>
50#include <sys/kernel.h>
51
52#include <vm/vm.h>
53#include <vm/vm_page.h>
54#include <vm/vm_map.h>
55
56#include <machine/atomic.h>
57#include <machine/bus.h>
58#include <machine/cpufunc.h>
59
60struct bus_dma_tag {
61	bus_dma_tag_t		parent;
62	bus_size_t		alignment;
63	bus_size_t		boundary;
64	bus_addr_t		lowaddr;
65	bus_addr_t		highaddr;
66	bus_dma_filter_t	*filter;
67	void			*filterarg;
68	bus_size_t		maxsize;
69	u_int			nsegments;
70	bus_size_t		maxsegsz;
71	int			flags;
72	int			ref_count;
73	int			map_count;
74	bus_dma_lock_t		*lockfunc;
75	void			*lockfuncarg;
76	/*
77	 * DMA range for this tag.  If the page doesn't fall within
78	 * one of these ranges, an error is returned.  The caller
79	 * may then decide what to do with the transfer.  If the
80	 * range pointer is NULL, it is ignored.
81	 */
82	struct arm32_dma_range	*ranges;
83	int			_nranges;
84};
85
86#define DMAMAP_LINEAR		0x1
87#define DMAMAP_MBUF		0x2
88#define DMAMAP_UIO		0x4
89#define DMAMAP_ALLOCATED	0x10
90#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
91#define DMAMAP_COHERENT		0x8
92struct bus_dmamap {
93        bus_dma_tag_t	dmat;
94	int		flags;
95	void 		*buffer;
96	void		*origbuffer;
97	void		*allocbuffer;
98	TAILQ_ENTRY(bus_dmamap)	freelist;
99	int		len;
100};
101
102static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
103	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
104
105#define BUSDMA_STATIC_MAPS	500
106static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
107
108static struct mtx busdma_mtx;
109
110MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
111
112static void
113arm_dmamap_freelist_init(void *dummy)
114{
115	int i;
116
117	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
118		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
119}
120
121SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL);
122
123/*
124 * Check to see if the specified page is in an allowed DMA range.
125 */
126
127static __inline int
128bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
129    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
130    int flags, vm_offset_t *lastaddrp, int *segp);
131
132static __inline struct arm32_dma_range *
133_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
134    bus_addr_t curaddr)
135{
136	struct arm32_dma_range *dr;
137	int i;
138
139	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
140		if (curaddr >= dr->dr_sysbase &&
141		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
142			return (dr);
143	}
144
145	return (NULL);
146}
147/*
148 * Convenience function for manipulating driver locks from busdma (during
149 * busdma_swi, for example).  Drivers that don't provide their own locks
150 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
151 * non-mutex locking scheme don't have to use this at all.
152 */
153void
154busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
155{
156	struct mtx *dmtx;
157
158	dmtx = (struct mtx *)arg;
159	switch (op) {
160	case BUS_DMA_LOCK:
161		mtx_lock(dmtx);
162		break;
163	case BUS_DMA_UNLOCK:
164		mtx_unlock(dmtx);
165		break;
166	default:
167		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
168	}
169}
170
171/*
172 * dflt_lock should never get called.  It gets put into the dma tag when
173 * lockfunc == NULL, which is only valid if the maps that are associated
174 * with the tag are meant to never be defered.
175 * XXX Should have a way to identify which driver is responsible here.
176 */
177static void
178dflt_lock(void *arg, bus_dma_lock_op_t op)
179{
180#ifdef INVARIANTS
181	panic("driver error: busdma dflt_lock called");
182#else
183	printf("DRIVER_ERROR: busdma dflt_lock called\n");
184#endif
185}
186
187static __inline bus_dmamap_t
188_busdma_alloc_dmamap(void)
189{
190	bus_dmamap_t map;
191
192	mtx_lock(&busdma_mtx);
193	map = TAILQ_FIRST(&dmamap_freelist);
194	if (map)
195		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
196	mtx_unlock(&busdma_mtx);
197	if (!map) {
198		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
199		if (map)
200			map->flags = DMAMAP_ALLOCATED;
201	} else
202		map->flags = 0;
203	return (map);
204}
205
206static __inline void
207_busdma_free_dmamap(bus_dmamap_t map)
208{
209	if (map->flags & DMAMAP_ALLOCATED)
210		free(map, M_DEVBUF);
211	else {
212		mtx_lock(&busdma_mtx);
213		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
214		mtx_unlock(&busdma_mtx);
215	}
216}
217
218/*
219 * Allocate a device specific dma_tag.
220 */
221#define SEG_NB 1024
222
223int
224bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
225		   bus_size_t boundary, bus_addr_t lowaddr,
226		   bus_addr_t highaddr, bus_dma_filter_t *filter,
227		   void *filterarg, bus_size_t maxsize, int nsegments,
228		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
229		   void *lockfuncarg, bus_dma_tag_t *dmat)
230{
231	bus_dma_tag_t newtag;
232	int error = 0;
233	/* Return a NULL tag on failure */
234	*dmat = NULL;
235
236	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
237	if (newtag == NULL) {
238		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
239		    __func__, newtag, 0, error);
240		return (ENOMEM);
241	}
242
243	newtag->parent = parent;
244	newtag->alignment = alignment;
245	newtag->boundary = boundary;
246	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
247	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
248	newtag->filter = filter;
249	newtag->filterarg = filterarg;
250        newtag->maxsize = maxsize;
251        newtag->nsegments = nsegments;
252	newtag->maxsegsz = maxsegsz;
253	newtag->flags = flags;
254	newtag->ref_count = 1; /* Count ourself */
255	newtag->map_count = 0;
256	newtag->ranges = bus_dma_get_range();
257	newtag->_nranges = bus_dma_get_range_nb();
258	if (lockfunc != NULL) {
259		newtag->lockfunc = lockfunc;
260		newtag->lockfuncarg = lockfuncarg;
261	} else {
262		newtag->lockfunc = dflt_lock;
263		newtag->lockfuncarg = NULL;
264	}
265        /*
266	 * Take into account any restrictions imposed by our parent tag
267	 */
268        if (parent != NULL) {
269                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
270                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
271		if (newtag->boundary == 0)
272			newtag->boundary = parent->boundary;
273		else if (parent->boundary != 0)
274                	newtag->boundary = min(parent->boundary,
275					       newtag->boundary);
276                if (newtag->filter == NULL) {
277                        /*
278                         * Short circuit looking at our parent directly
279                         * since we have encapsulated all of its information
280                         */
281                        newtag->filter = parent->filter;
282                        newtag->filterarg = parent->filterarg;
283                        newtag->parent = parent->parent;
284		}
285		if (newtag->parent != NULL)
286			atomic_add_int(&parent->ref_count, 1);
287	}
288
289	*dmat = newtag;
290	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
291	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
292
293	return (error);
294}
295
296int
297bus_dma_tag_destroy(bus_dma_tag_t dmat)
298{
299#ifdef KTR
300	bus_dma_tag_t dmat_copy = dmat;
301#endif
302
303	if (dmat != NULL) {
304
305                if (dmat->map_count != 0)
306                        return (EBUSY);
307
308                while (dmat != NULL) {
309                        bus_dma_tag_t parent;
310
311                        parent = dmat->parent;
312                        atomic_subtract_int(&dmat->ref_count, 1);
313                        if (dmat->ref_count == 0) {
314                                free(dmat, M_DEVBUF);
315                                /*
316                                 * Last reference count, so
317                                 * release our reference
318                                 * count on our parent.
319                                 */
320                                dmat = parent;
321                        } else
322                                dmat = NULL;
323                }
324        }
325	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
326
327        return (0);
328}
329
330/*
331 * Allocate a handle for mapping from kva/uva/physical
332 * address space into bus device space.
333 */
334int
335bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
336{
337	bus_dmamap_t newmap;
338#ifdef KTR
339	int error = 0;
340#endif
341
342	newmap = _busdma_alloc_dmamap();
343	if (newmap == NULL) {
344		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
345		return (ENOMEM);
346	}
347	*mapp = newmap;
348	newmap->dmat = dmat;
349	dmat->map_count++;
350
351	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
352	    __func__, dmat, dmat->flags, error);
353
354	return (0);
355}
356
357/*
358 * Destroy a handle for mapping from kva/uva/physical
359 * address space into bus device space.
360 */
361int
362bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
363{
364
365	_busdma_free_dmamap(map);
366        dmat->map_count--;
367	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
368        return (0);
369}
370
371/*
372 * Allocate a piece of memory that can be efficiently mapped into
373 * bus device space based on the constraints lited in the dma tag.
374 * A dmamap to for use with dmamap_load is also allocated.
375 */
376int
377bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
378                 bus_dmamap_t *mapp)
379{
380	bus_dmamap_t newmap = NULL;
381
382	int mflags;
383
384	if (flags & BUS_DMA_NOWAIT)
385		mflags = M_NOWAIT;
386	else
387		mflags = M_WAITOK;
388	if (flags & BUS_DMA_ZERO)
389		mflags |= M_ZERO;
390
391	newmap = _busdma_alloc_dmamap();
392	if (newmap == NULL) {
393		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
394		    __func__, dmat, dmat->flags, ENOMEM);
395		return (ENOMEM);
396	}
397	dmat->map_count++;
398	*mapp = newmap;
399	newmap->dmat = dmat;
400
401        if (dmat->maxsize <= PAGE_SIZE) {
402                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
403        } else {
404                /*
405                 * XXX Use Contigmalloc until it is merged into this facility
406                 *     and handles multi-seg allocations.  Nobody is doing
407                 *     multi-seg allocations yet though.
408                 */
409                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
410                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
411                    dmat->boundary);
412        }
413        if (*vaddr == NULL) {
414		if (newmap != NULL) {
415			_busdma_free_dmamap(newmap);
416			dmat->map_count--;
417		}
418		*mapp = NULL;
419                return (ENOMEM);
420	}
421	if (flags & BUS_DMA_COHERENT) {
422		void *tmpaddr = arm_remap_nocache(
423		    (void *)((vm_offset_t)*vaddr &~ PAGE_MASK),
424		    dmat->maxsize + ((vm_offset_t)*vaddr & PAGE_MASK));
425
426		if (tmpaddr) {
427			tmpaddr = (void *)((vm_offset_t)(tmpaddr) +
428			    ((vm_offset_t)*vaddr & PAGE_MASK));
429			newmap->origbuffer = *vaddr;
430			newmap->allocbuffer = tmpaddr;
431			cpu_idcache_wbinv_range((vm_offset_t)*vaddr,
432			    dmat->maxsize);
433			*vaddr = tmpaddr;
434		} else
435			newmap->origbuffer = newmap->allocbuffer = NULL;
436	} else
437		newmap->origbuffer = newmap->allocbuffer = NULL;
438        return (0);
439}
440
441/*
442 * Free a piece of memory and it's allocated dmamap, that was allocated
443 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
444 */
445void
446bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
447{
448	if (map->allocbuffer) {
449		KASSERT(map->allocbuffer == vaddr,
450		    ("Trying to freeing the wrong DMA buffer"));
451		vaddr = map->origbuffer;
452		arm_unmap_nocache(map->allocbuffer, dmat->maxsize);
453	}
454        if (dmat->maxsize <= PAGE_SIZE)
455		free(vaddr, M_DEVBUF);
456        else {
457		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
458	}
459	dmat->map_count--;
460	_busdma_free_dmamap(map);
461	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
462}
463
464/*
465 * Utility function to load a linear buffer.  lastaddrp holds state
466 * between invocations (for multiple-buffer loads).  segp contains
467 * the starting segment on entrance, and the ending segment on exit.
468 * first indicates if this is the first invocation of this function.
469 */
470static __inline int
471bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
472    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
473    int flags, vm_offset_t *lastaddrp, int *segp)
474{
475	bus_size_t sgsize;
476	bus_addr_t curaddr, lastaddr, baddr, bmask;
477	vm_offset_t vaddr = (vm_offset_t)buf;
478	int seg;
479	int error = 0;
480	pd_entry_t *pde;
481	pt_entry_t pte;
482	pt_entry_t *ptep;
483
484	lastaddr = *lastaddrp;
485	bmask = ~(dmat->boundary - 1);
486
487	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
488	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
489
490	for (seg = *segp; buflen > 0 ; ) {
491		/*
492		 * Get the physical address for this segment.
493		 *
494		 * XXX Don't support checking for coherent mappings
495		 * XXX in user address space.
496		 */
497		if (__predict_true(pmap == pmap_kernel())) {
498			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
499			if (__predict_false(pmap_pde_section(pde))) {
500				curaddr = (*pde & L1_S_FRAME) |
501				    (vaddr & L1_S_OFFSET);
502				if (*pde & L1_S_CACHE_MASK) {
503					map->flags &=
504					    ~DMAMAP_COHERENT;
505				}
506			} else {
507				pte = *ptep;
508				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
509				    ("INV type"));
510				if (__predict_false((pte & L2_TYPE_MASK)
511						    == L2_TYPE_L)) {
512					curaddr = (pte & L2_L_FRAME) |
513					    (vaddr & L2_L_OFFSET);
514					if (pte & L2_L_CACHE_MASK) {
515						map->flags &=
516						    ~DMAMAP_COHERENT;
517
518					}
519				} else {
520					curaddr = (pte & L2_S_FRAME) |
521					    (vaddr & L2_S_OFFSET);
522					if (pte & L2_S_CACHE_MASK) {
523						map->flags &=
524						    ~DMAMAP_COHERENT;
525					}
526				}
527			}
528		} else {
529			curaddr = pmap_extract(pmap, vaddr);
530			map->flags &= ~DMAMAP_COHERENT;
531		}
532
533		if (dmat->ranges) {
534			struct arm32_dma_range *dr;
535
536			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
537			    curaddr);
538			if (dr == NULL)
539				return (EINVAL);
540			/*
541		     	 * In a valid DMA range.  Translate the physical
542			 * memory address to an address in the DMA window.
543			 */
544			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
545
546		}
547		/*
548		 * Compute the segment size, and adjust counts.
549		 */
550		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
551		if (buflen < sgsize)
552			sgsize = buflen;
553
554		/*
555		 * Make sure we don't cross any boundaries.
556		 */
557		if (dmat->boundary > 0) {
558			baddr = (curaddr + dmat->boundary) & bmask;
559			if (sgsize > (baddr - curaddr))
560				sgsize = (baddr - curaddr);
561		}
562
563		/*
564		 * Insert chunk into a segment, coalescing with
565		 * the previous segment if possible.
566		 */
567		if (seg >= 0 && curaddr == lastaddr &&
568		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
569		    (dmat->boundary == 0 ||
570		     (segs[seg].ds_addr & bmask) ==
571		     (curaddr & bmask))) {
572			segs[seg].ds_len += sgsize;
573			goto segdone;
574		} else {
575			if (++seg >= dmat->nsegments)
576				break;
577			segs[seg].ds_addr = curaddr;
578			segs[seg].ds_len = sgsize;
579		}
580		if (error)
581			break;
582segdone:
583		lastaddr = curaddr + sgsize;
584		vaddr += sgsize;
585		buflen -= sgsize;
586	}
587
588	*segp = seg;
589	*lastaddrp = lastaddr;
590
591	/*
592	 * Did we fit?
593	 */
594	if (buflen != 0)
595		error = EFBIG; /* XXX better return value here? */
596	return (error);
597}
598
599/*
600 * Map the buffer buf into bus space using the dmamap map.
601 */
602int
603bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
604                bus_size_t buflen, bus_dmamap_callback_t *callback,
605                void *callback_arg, int flags)
606{
607     	vm_offset_t	lastaddr = 0;
608	int		error, nsegs = -1;
609#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
610	bus_dma_segment_t dm_segments[dmat->nsegments];
611#else
612	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
613#endif
614
615	KASSERT(dmat != NULL, ("dmatag is NULL"));
616	KASSERT(map != NULL, ("dmamap is NULL"));
617	map->flags &= ~DMAMAP_TYPE_MASK;
618	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
619	map->buffer = buf;
620	map->len = buflen;
621	error = bus_dmamap_load_buffer(dmat,
622	    dm_segments, map, buf, buflen, kernel_pmap,
623	    flags, &lastaddr, &nsegs);
624	if (error)
625		(*callback)(callback_arg, NULL, 0, error);
626	else
627		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
628
629	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
630	    __func__, dmat, dmat->flags, nsegs + 1, error);
631
632	return (0);
633}
634
635/*
636 * Like bus_dmamap_load(), but for mbufs.
637 */
638int
639bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
640		     bus_dmamap_callback2_t *callback, void *callback_arg,
641		     int flags)
642{
643#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
644	bus_dma_segment_t dm_segments[dmat->nsegments];
645#else
646	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
647#endif
648	int nsegs = -1, error = 0;
649
650	M_ASSERTPKTHDR(m0);
651
652	map->flags &= ~DMAMAP_TYPE_MASK;
653	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
654	map->buffer = m0;
655	map->len = 0;
656	if (m0->m_pkthdr.len <= dmat->maxsize) {
657		vm_offset_t lastaddr = 0;
658		struct mbuf *m;
659
660		for (m = m0; m != NULL && error == 0; m = m->m_next) {
661			if (m->m_len > 0) {
662				error = bus_dmamap_load_buffer(dmat,
663				    dm_segments, map, m->m_data, m->m_len,
664				    pmap_kernel(), flags, &lastaddr, &nsegs);
665				map->len += m->m_len;
666			}
667		}
668	} else {
669		error = EINVAL;
670	}
671
672	if (error) {
673		/*
674		 * force "no valid mappings" on error in callback.
675		 */
676		(*callback)(callback_arg, dm_segments, 0, 0, error);
677	} else {
678		(*callback)(callback_arg, dm_segments, nsegs + 1,
679		    m0->m_pkthdr.len, error);
680	}
681	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
682	    __func__, dmat, dmat->flags, error, nsegs + 1);
683
684	return (error);
685}
686
687int
688bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
689			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
690			int flags)
691{
692	int error = 0;
693	M_ASSERTPKTHDR(m0);
694
695	flags |= BUS_DMA_NOWAIT;
696	*nsegs = -1;
697	map->flags &= ~DMAMAP_TYPE_MASK;
698	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
699	map->buffer = m0;
700	map->len = 0;
701	if (m0->m_pkthdr.len <= dmat->maxsize) {
702		vm_offset_t lastaddr = 0;
703		struct mbuf *m;
704
705		for (m = m0; m != NULL && error == 0; m = m->m_next) {
706			if (m->m_len > 0) {
707				error = bus_dmamap_load_buffer(dmat, segs, map,
708						m->m_data, m->m_len,
709						pmap_kernel(), flags, &lastaddr,
710						nsegs);
711				map->len += m->m_len;
712			}
713		}
714	} else {
715		error = EINVAL;
716	}
717
718	/* XXX FIXME: Having to increment nsegs is really annoying */
719	++*nsegs;
720	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
721	    __func__, dmat, dmat->flags, error, *nsegs);
722	return (error);
723}
724
725/*
726 * Like bus_dmamap_load(), but for uios.
727 */
728int
729bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
730    bus_dmamap_callback2_t *callback, void *callback_arg,
731    int flags)
732{
733	vm_offset_t lastaddr;
734#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
735	bus_dma_segment_t dm_segments[dmat->nsegments];
736#else
737	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
738#endif
739	int nsegs, i, error;
740	bus_size_t resid;
741	struct iovec *iov;
742	struct pmap *pmap;
743
744	resid = uio->uio_resid;
745	iov = uio->uio_iov;
746	map->flags &= ~DMAMAP_TYPE_MASK;
747	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
748	map->buffer = uio;
749	map->len = 0;
750
751	if (uio->uio_segflg == UIO_USERSPACE) {
752		KASSERT(uio->uio_td != NULL,
753		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
754		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
755	} else
756		pmap = kernel_pmap;
757
758	error = 0;
759	nsegs = -1;
760	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
761		/*
762		 * Now at the first iovec to load.  Load each iovec
763		 * until we have exhausted the residual count.
764		 */
765		bus_size_t minlen =
766		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
767		caddr_t addr = (caddr_t) iov[i].iov_base;
768
769		if (minlen > 0) {
770			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
771			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
772
773			map->len += minlen;
774			resid -= minlen;
775		}
776	}
777
778	if (error) {
779		/*
780		 * force "no valid mappings" on error in callback.
781		 */
782		(*callback)(callback_arg, dm_segments, 0, 0, error);
783	} else {
784		(*callback)(callback_arg, dm_segments, nsegs+1,
785		    uio->uio_resid, error);
786	}
787
788	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
789	    __func__, dmat, dmat->flags, error, nsegs + 1);
790	return (error);
791}
792
793/*
794 * Release the mapping held by map.
795 */
796void
797_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
798{
799	map->flags &= ~DMAMAP_TYPE_MASK;
800	return;
801}
802
803static __inline void
804bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
805{
806
807	if (op & BUS_DMASYNC_PREWRITE)
808		cpu_dcache_wb_range((vm_offset_t)buf, len);
809	if (op & BUS_DMASYNC_POSTREAD) {
810		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
811			cpu_dcache_inv_range((vm_offset_t)buf, len);
812		else
813			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
814
815	}
816}
817
818void
819_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
820{
821	struct mbuf *m;
822	struct uio *uio;
823	int resid;
824	struct iovec *iov;
825
826	if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
827		return;
828	if (map->flags & DMAMAP_COHERENT)
829		return;
830	if ((op && BUS_DMASYNC_POSTREAD) && (map->len >= 2 * PAGE_SIZE)) {
831		cpu_dcache_wbinv_all();
832		return;
833	}
834	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
835	switch(map->flags & DMAMAP_TYPE_MASK) {
836	case DMAMAP_LINEAR:
837		bus_dmamap_sync_buf(map->buffer, map->len, op);
838		break;
839	case DMAMAP_MBUF:
840		m = map->buffer;
841		while (m) {
842			if (m->m_len > 0)
843				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
844			m = m->m_next;
845		}
846		break;
847	case DMAMAP_UIO:
848		uio = map->buffer;
849		iov = uio->uio_iov;
850		resid = uio->uio_resid;
851		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
852			bus_size_t minlen = resid < iov[i].iov_len ? resid :
853			    iov[i].iov_len;
854			if (minlen > 0) {
855				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
856				    op);
857				resid -= minlen;
858			}
859		}
860		break;
861	default:
862		break;
863	}
864	cpu_drain_writebuf();
865}
866