busdma_machdep-v4.c revision 146597
1/*-
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 146597 2005-05-24 22:10:35Z cognet $");
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/lock.h>
45#include <sys/proc.h>
46#include <sys/mutex.h>
47#include <sys/mbuf.h>
48#include <sys/uio.h>
49#include <sys/ktr.h>
50#include <sys/kernel.h>
51
52#include <vm/vm.h>
53#include <vm/vm_page.h>
54#include <vm/vm_map.h>
55
56#include <machine/atomic.h>
57#include <machine/bus.h>
58#include <machine/cpufunc.h>
59
60struct bus_dma_tag {
61	bus_dma_tag_t		parent;
62	bus_size_t		alignment;
63	bus_size_t		boundary;
64	bus_addr_t		lowaddr;
65	bus_addr_t		highaddr;
66	bus_dma_filter_t	*filter;
67	void			*filterarg;
68	bus_size_t		maxsize;
69	u_int			nsegments;
70	bus_size_t		maxsegsz;
71	int			flags;
72	int			ref_count;
73	int			map_count;
74	bus_dma_lock_t		*lockfunc;
75	void			*lockfuncarg;
76	/*
77	 * DMA range for this tag.  If the page doesn't fall within
78	 * one of these ranges, an error is returned.  The caller
79	 * may then decide what to do with the transfer.  If the
80	 * range pointer is NULL, it is ignored.
81	 */
82	struct arm32_dma_range	*ranges;
83	int			_nranges;
84};
85
86#define DMAMAP_LINEAR		0x1
87#define DMAMAP_MBUF		0x2
88#define DMAMAP_UIO		0x4
89#define DMAMAP_ALLOCATED	0x10
90#define DMAMAP_STATIC_BUSY	0x20
91#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
92#define DMAMAP_COHERENT		0x8
93struct bus_dmamap {
94        bus_dma_tag_t	dmat;
95	int		flags;
96	void 		*buffer;
97	int		len;
98};
99
100#define BUSDMA_STATIC_MAPS	500
101static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
102
103static struct mtx busdma_mtx;
104
105MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
106
107/*
108 * Check to see if the specified page is in an allowed DMA range.
109 */
110
111static __inline int
112bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
113    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
114    int flags, vm_offset_t *lastaddrp, int *segp);
115
116static __inline struct arm32_dma_range *
117_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
118    bus_addr_t curaddr)
119{
120	struct arm32_dma_range *dr;
121	int i;
122
123	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
124		if (curaddr >= dr->dr_sysbase &&
125		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
126			return (dr);
127	}
128
129	return (NULL);
130}
131/*
132 * Convenience function for manipulating driver locks from busdma (during
133 * busdma_swi, for example).  Drivers that don't provide their own locks
134 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
135 * non-mutex locking scheme don't have to use this at all.
136 */
137void
138busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
139{
140	struct mtx *dmtx;
141
142	dmtx = (struct mtx *)arg;
143	switch (op) {
144	case BUS_DMA_LOCK:
145		mtx_lock(dmtx);
146		break;
147	case BUS_DMA_UNLOCK:
148		mtx_unlock(dmtx);
149		break;
150	default:
151		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
152	}
153}
154
155/*
156 * dflt_lock should never get called.  It gets put into the dma tag when
157 * lockfunc == NULL, which is only valid if the maps that are associated
158 * with the tag are meant to never be defered.
159 * XXX Should have a way to identify which driver is responsible here.
160 */
161static void
162dflt_lock(void *arg, bus_dma_lock_op_t op)
163{
164#ifdef INVARIANTS
165	panic("driver error: busdma dflt_lock called");
166#else
167	printf("DRIVER_ERROR: busdma dflt_lock called\n");
168#endif
169}
170
171static bus_dmamap_t
172_busdma_alloc_dmamap(void)
173{
174	int i;
175	bus_dmamap_t map;
176
177	mtx_lock(&busdma_mtx);
178	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
179		if (!(map_pool[i].flags & DMAMAP_STATIC_BUSY)) {
180			bzero(&map_pool[i], sizeof(map_pool[i]));
181			map_pool[i].flags |= DMAMAP_STATIC_BUSY;
182			mtx_unlock(&busdma_mtx);
183			return (&map_pool[i]);
184		}
185	mtx_unlock(&busdma_mtx);
186	map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
187	if (map)
188		map->flags |= DMAMAP_ALLOCATED;
189	return (map);
190}
191
192static void
193_busdma_free_dmamap(bus_dmamap_t map)
194{
195	if (map->flags & DMAMAP_ALLOCATED)
196		free(map, M_DEVBUF);
197	else {
198		mtx_lock(&busdma_mtx);
199		map->flags &= ~DMAMAP_STATIC_BUSY;
200		mtx_unlock(&busdma_mtx);
201	}
202}
203
204/*
205 * Allocate a device specific dma_tag.
206 */
207#define SEG_NB 1024
208
209int
210bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
211		   bus_size_t boundary, bus_addr_t lowaddr,
212		   bus_addr_t highaddr, bus_dma_filter_t *filter,
213		   void *filterarg, bus_size_t maxsize, int nsegments,
214		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
215		   void *lockfuncarg, bus_dma_tag_t *dmat)
216{
217	bus_dma_tag_t newtag;
218	int error = 0;
219	/* Return a NULL tag on failure */
220	*dmat = NULL;
221
222	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
223	if (newtag == NULL) {
224		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
225		    __func__, newtag, 0, error);
226		return (ENOMEM);
227	}
228
229	newtag->parent = parent;
230	newtag->alignment = alignment;
231	newtag->boundary = boundary;
232	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
233	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
234	newtag->filter = filter;
235	newtag->filterarg = filterarg;
236        newtag->maxsize = maxsize;
237        newtag->nsegments = nsegments;
238	newtag->maxsegsz = maxsegsz;
239	newtag->flags = flags;
240	newtag->ref_count = 1; /* Count ourself */
241	newtag->map_count = 0;
242	newtag->ranges = bus_dma_get_range();
243	newtag->_nranges = bus_dma_get_range_nb();
244	if (lockfunc != NULL) {
245		newtag->lockfunc = lockfunc;
246		newtag->lockfuncarg = lockfuncarg;
247	} else {
248		newtag->lockfunc = dflt_lock;
249		newtag->lockfuncarg = NULL;
250	}
251        /*
252	 * Take into account any restrictions imposed by our parent tag
253	 */
254        if (parent != NULL) {
255                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
256                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
257		if (newtag->boundary == 0)
258			newtag->boundary = parent->boundary;
259		else if (parent->boundary != 0)
260                	newtag->boundary = min(parent->boundary,
261					       newtag->boundary);
262                if (newtag->filter == NULL) {
263                        /*
264                         * Short circuit looking at our parent directly
265                         * since we have encapsulated all of its information
266                         */
267                        newtag->filter = parent->filter;
268                        newtag->filterarg = parent->filterarg;
269                        newtag->parent = parent->parent;
270		}
271		if (newtag->parent != NULL)
272			atomic_add_int(&parent->ref_count, 1);
273	}
274
275	*dmat = newtag;
276	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
277	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
278
279	return (error);
280}
281
282int
283bus_dma_tag_destroy(bus_dma_tag_t dmat)
284{
285#ifdef KTR
286	bus_dma_tag_t dmat_copy = dmat;
287#endif
288
289	if (dmat != NULL) {
290
291                if (dmat->map_count != 0)
292                        return (EBUSY);
293
294                while (dmat != NULL) {
295                        bus_dma_tag_t parent;
296
297                        parent = dmat->parent;
298                        atomic_subtract_int(&dmat->ref_count, 1);
299                        if (dmat->ref_count == 0) {
300                                free(dmat, M_DEVBUF);
301                                /*
302                                 * Last reference count, so
303                                 * release our reference
304                                 * count on our parent.
305                                 */
306                                dmat = parent;
307                        } else
308                                dmat = NULL;
309                }
310        }
311	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
312
313        return (0);
314}
315
316/*
317 * Allocate a handle for mapping from kva/uva/physical
318 * address space into bus device space.
319 */
320int
321bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
322{
323	bus_dmamap_t newmap;
324#ifdef KTR
325	int error = 0;
326#endif
327
328	newmap = _busdma_alloc_dmamap();
329	if (newmap == NULL) {
330		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
331		return (ENOMEM);
332	}
333	*mapp = newmap;
334	newmap->dmat = dmat;
335	dmat->map_count++;
336
337	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
338	    __func__, dmat, dmat->flags, error);
339
340	return (0);
341}
342
343/*
344 * Destroy a handle for mapping from kva/uva/physical
345 * address space into bus device space.
346 */
347int
348bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
349{
350
351	_busdma_free_dmamap(map);
352        dmat->map_count--;
353	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
354        return (0);
355}
356
357/*
358 * Allocate a piece of memory that can be efficiently mapped into
359 * bus device space based on the constraints lited in the dma tag.
360 * A dmamap to for use with dmamap_load is also allocated.
361 */
362int
363bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
364                 bus_dmamap_t *mapp)
365{
366	bus_dmamap_t newmap = NULL;
367
368	int mflags;
369
370	if (flags & BUS_DMA_NOWAIT)
371		mflags = M_NOWAIT;
372	else
373		mflags = M_WAITOK;
374	if (flags & BUS_DMA_ZERO)
375		mflags |= M_ZERO;
376
377	newmap = _busdma_alloc_dmamap();
378	if (newmap == NULL) {
379		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
380		    __func__, dmat, dmat->flags, ENOMEM);
381		return (ENOMEM);
382	}
383	dmat->map_count++;
384	*mapp = newmap;
385	newmap->dmat = dmat;
386
387        if (dmat->maxsize <= PAGE_SIZE) {
388                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
389        } else {
390                /*
391                 * XXX Use Contigmalloc until it is merged into this facility
392                 *     and handles multi-seg allocations.  Nobody is doing
393                 *     multi-seg allocations yet though.
394                 */
395                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
396                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
397                    dmat->boundary);
398        }
399        if (*vaddr == NULL) {
400		if (newmap != NULL) {
401			_busdma_free_dmamap(newmap);
402			dmat->map_count--;
403		}
404		*mapp = NULL;
405                return (ENOMEM);
406	}
407        return (0);
408}
409
410/*
411 * Free a piece of memory and it's allocated dmamap, that was allocated
412 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
413 */
414void
415bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
416{
417        if (dmat->maxsize <= PAGE_SIZE)
418		free(vaddr, M_DEVBUF);
419        else {
420		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
421	}
422	dmat->map_count--;
423	_busdma_free_dmamap(map);
424	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
425}
426
427/*
428 * Utility function to load a linear buffer.  lastaddrp holds state
429 * between invocations (for multiple-buffer loads).  segp contains
430 * the starting segment on entrance, and the ending segment on exit.
431 * first indicates if this is the first invocation of this function.
432 */
433static int __inline
434bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
435    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
436    int flags, vm_offset_t *lastaddrp, int *segp)
437{
438	bus_size_t sgsize;
439	bus_addr_t curaddr, lastaddr, baddr, bmask;
440	vm_offset_t vaddr = (vm_offset_t)buf;
441	int seg;
442	int error = 0;
443	pd_entry_t *pde;
444	pt_entry_t pte;
445	pt_entry_t *ptep;
446
447	lastaddr = *lastaddrp;
448	bmask = ~(dmat->boundary - 1);
449
450	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
451	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
452
453	for (seg = *segp; buflen > 0 ; ) {
454		/*
455		 * Get the physical address for this segment.
456		 *
457		 * XXX Don't support checking for coherent mappings
458		 * XXX in user address space.
459		 */
460		if (0 && __predict_true(pmap == pmap_kernel())) {
461			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
462			if (__predict_false(pmap_pde_section(pde))) {
463				curaddr = (*pde & L1_S_FRAME) |
464				    (vaddr & L1_S_OFFSET);
465				if (*pde & L1_S_CACHE_MASK) {
466					map->flags &=
467					    ~DMAMAP_COHERENT;
468				}
469			} else {
470				pte = *ptep;
471				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
472				    ("INV type"));
473				if (__predict_false((pte & L2_TYPE_MASK)
474						    == L2_TYPE_L)) {
475					curaddr = (pte & L2_L_FRAME) |
476					    (vaddr & L2_L_OFFSET);
477					if (pte & L2_L_CACHE_MASK) {
478						map->flags &=
479						    ~DMAMAP_COHERENT;
480
481					}
482				} else {
483					curaddr = (pte & L2_S_FRAME) |
484					    (vaddr & L2_S_OFFSET);
485					if (pte & L2_S_CACHE_MASK) {
486						map->flags &=
487						    ~DMAMAP_COHERENT;
488					}
489				}
490			}
491		} else {
492			curaddr = pmap_extract(pmap, vaddr);
493			map->flags &= ~DMAMAP_COHERENT;
494		}
495
496		if (dmat->ranges) {
497			struct arm32_dma_range *dr;
498
499			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
500			    curaddr);
501			if (dr == NULL)
502				return (EINVAL);
503			/*
504		     	 * In a valid DMA range.  Translate the physical
505			 * memory address to an address in the DMA window.
506			 */
507			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
508
509		}
510		/*
511		 * Compute the segment size, and adjust counts.
512		 */
513		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
514		if (buflen < sgsize)
515			sgsize = buflen;
516
517		/*
518		 * Make sure we don't cross any boundaries.
519		 */
520		if (dmat->boundary > 0) {
521			baddr = (curaddr + dmat->boundary) & bmask;
522			if (sgsize > (baddr - curaddr))
523				sgsize = (baddr - curaddr);
524		}
525
526		/*
527		 * Insert chunk into a segment, coalescing with
528		 * the previous segment if possible.
529		 */
530		if (seg >= 0 && curaddr == lastaddr &&
531		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
532		    (dmat->boundary == 0 ||
533		     (segs[seg].ds_addr & bmask) ==
534		     (curaddr & bmask))) {
535			segs[seg].ds_len += sgsize;
536			goto segdone;
537		} else {
538			if (++seg >= dmat->nsegments)
539				break;
540			segs[seg].ds_addr = curaddr;
541			segs[seg].ds_len = sgsize;
542		}
543		if (error)
544			break;
545segdone:
546		lastaddr = curaddr + sgsize;
547		vaddr += sgsize;
548		buflen -= sgsize;
549	}
550
551	*segp = seg;
552	*lastaddrp = lastaddr;
553
554	/*
555	 * Did we fit?
556	 */
557	if (buflen != 0)
558		error = EFBIG; /* XXX better return value here? */
559	return (error);
560}
561
562/*
563 * Map the buffer buf into bus space using the dmamap map.
564 */
565int
566bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
567                bus_size_t buflen, bus_dmamap_callback_t *callback,
568                void *callback_arg, int flags)
569{
570     	vm_offset_t	lastaddr = 0;
571	int		error, nsegs = -1;
572#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
573	bus_dma_segment_t dm_segments[dmat->nsegments];
574#else
575	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
576#endif
577
578	KASSERT(dmat != NULL, ("dmatag is NULL"));
579	KASSERT(map != NULL, ("dmamap is NULL"));
580	map->flags &= ~DMAMAP_TYPE_MASK;
581	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
582	map->buffer = buf;
583	map->len = buflen;
584	error = bus_dmamap_load_buffer(dmat,
585	    dm_segments, map, buf, buflen, kernel_pmap,
586	    flags, &lastaddr, &nsegs);
587	if (error)
588		(*callback)(callback_arg, NULL, 0, error);
589	else
590		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
591
592	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
593	    __func__, dmat, dmat->flags, nsegs + 1, error);
594
595	return (0);
596}
597
598/*
599 * Like bus_dmamap_load(), but for mbufs.
600 */
601int
602bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
603		     bus_dmamap_callback2_t *callback, void *callback_arg,
604		     int flags)
605{
606#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
607	bus_dma_segment_t dm_segments[dmat->nsegments];
608#else
609	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
610#endif
611	int nsegs = -1, error = 0;
612
613	M_ASSERTPKTHDR(m0);
614
615	map->flags &= ~DMAMAP_TYPE_MASK;
616	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
617	map->buffer = m0;
618	map->len = 0;
619	if (m0->m_pkthdr.len <= dmat->maxsize) {
620		vm_offset_t lastaddr = 0;
621		struct mbuf *m;
622
623		for (m = m0; m != NULL && error == 0; m = m->m_next) {
624			if (m->m_len > 0) {
625				error = bus_dmamap_load_buffer(dmat,
626				    dm_segments, map, m->m_data, m->m_len,
627				    pmap_kernel(), flags, &lastaddr, &nsegs);
628				map->len += m->m_len;
629			}
630		}
631	} else {
632		error = EINVAL;
633	}
634
635	if (error) {
636		/*
637		 * force "no valid mappings" on error in callback.
638		 */
639		(*callback)(callback_arg, dm_segments, 0, 0, error);
640	} else {
641		(*callback)(callback_arg, dm_segments, nsegs + 1,
642		    m0->m_pkthdr.len, error);
643	}
644	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
645	    __func__, dmat, dmat->flags, error, nsegs + 1);
646
647	return (error);
648}
649
650int
651bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
652			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
653			int flags)
654{
655	int error = 0;
656	M_ASSERTPKTHDR(m0);
657
658	flags |= BUS_DMA_NOWAIT;
659	*nsegs = -1;
660	map->flags &= ~DMAMAP_TYPE_MASK;
661	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
662	map->buffer = m0;
663	map->len = 0;
664	if (m0->m_pkthdr.len <= dmat->maxsize) {
665		vm_offset_t lastaddr = 0;
666		struct mbuf *m;
667
668		for (m = m0; m != NULL && error == 0; m = m->m_next) {
669			if (m->m_len > 0) {
670				error = bus_dmamap_load_buffer(dmat, segs, map,
671						m->m_data, m->m_len,
672						pmap_kernel(), flags, &lastaddr,
673						nsegs);
674				map->len += m->m_len;
675			}
676		}
677	} else {
678		error = EINVAL;
679	}
680
681	/* XXX FIXME: Having to increment nsegs is really annoying */
682	++*nsegs;
683	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
684	    __func__, dmat, dmat->flags, error, *nsegs);
685	return (error);
686}
687
688/*
689 * Like bus_dmamap_load(), but for uios.
690 */
691int
692bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
693    bus_dmamap_callback2_t *callback, void *callback_arg,
694    int flags)
695{
696	vm_offset_t lastaddr;
697#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
698	bus_dma_segment_t dm_segments[dmat->nsegments];
699#else
700	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
701#endif
702	int nsegs, i, error;
703	bus_size_t resid;
704	struct iovec *iov;
705	struct pmap *pmap;
706
707	resid = uio->uio_resid;
708	iov = uio->uio_iov;
709	map->flags &= ~DMAMAP_TYPE_MASK;
710	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
711	map->buffer = uio;
712	map->len = 0;
713
714	if (uio->uio_segflg == UIO_USERSPACE) {
715		KASSERT(uio->uio_td != NULL,
716		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
717		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
718	} else
719		pmap = kernel_pmap;
720
721	error = 0;
722	nsegs = -1;
723	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
724		/*
725		 * Now at the first iovec to load.  Load each iovec
726		 * until we have exhausted the residual count.
727		 */
728		bus_size_t minlen =
729		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
730		caddr_t addr = (caddr_t) iov[i].iov_base;
731
732		if (minlen > 0) {
733			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
734			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
735
736			map->len += minlen;
737			resid -= minlen;
738		}
739	}
740
741	if (error) {
742		/*
743		 * force "no valid mappings" on error in callback.
744		 */
745		(*callback)(callback_arg, dm_segments, 0, 0, error);
746	} else {
747		(*callback)(callback_arg, dm_segments, nsegs+1,
748		    uio->uio_resid, error);
749	}
750
751	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
752	    __func__, dmat, dmat->flags, error, nsegs + 1);
753	return (error);
754}
755
756/*
757 * Release the mapping held by map.
758 */
759void
760_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
761{
762	map->flags &= ~DMAMAP_TYPE_MASK;
763	return;
764}
765
766static void
767bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
768{
769
770	if (op & BUS_DMASYNC_PREWRITE)
771		cpu_dcache_wb_range((vm_offset_t)buf, len);
772	if (op & BUS_DMASYNC_POSTREAD) {
773		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
774			cpu_dcache_inv_range((vm_offset_t)buf, len);
775		else
776			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
777
778	}
779}
780
781void
782_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
783{
784	struct mbuf *m;
785	struct uio *uio;
786	int resid;
787	struct iovec *iov;
788
789	if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
790		return;
791	if (map->flags & DMAMAP_COHERENT)
792		return;
793	if (map->len > PAGE_SIZE) {
794		cpu_dcache_wbinv_all();
795		return;
796	}
797	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
798	switch(map->flags & DMAMAP_TYPE_MASK) {
799	case DMAMAP_LINEAR:
800		bus_dmamap_sync_buf(map->buffer, map->len, op);
801		break;
802	case DMAMAP_MBUF:
803		m = map->buffer;
804		while (m) {
805			bus_dmamap_sync_buf(m->m_data, m->m_len, op);
806			m = m->m_next;
807		}
808		break;
809	case DMAMAP_UIO:
810		uio = map->buffer;
811		iov = uio->uio_iov;
812		resid = uio->uio_resid;
813		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
814			bus_size_t minlen = resid < iov[i].iov_len ? resid :
815			    iov[i].iov_len;
816			if (minlen > 0) {
817				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
818				    op);
819				resid -= minlen;
820			}
821		}
822		break;
823	default:
824		break;
825	}
826	cpu_drain_writebuf();
827}
828