busdma_machdep-v4.c revision 140310
1/*-
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 140310 2005-01-15 19:07:23Z cognet $");
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/lock.h>
45#include <sys/proc.h>
46#include <sys/mutex.h>
47#include <sys/mbuf.h>
48#include <sys/uio.h>
49#include <sys/ktr.h>
50
51#include <vm/vm.h>
52#include <vm/vm_page.h>
53#include <vm/vm_map.h>
54
55#include <machine/atomic.h>
56#include <machine/bus.h>
57#include <machine/cpufunc.h>
58
59struct bus_dma_tag {
60	bus_dma_tag_t		parent;
61	bus_size_t		alignment;
62	bus_size_t		boundary;
63	bus_addr_t		lowaddr;
64	bus_addr_t		highaddr;
65	bus_dma_filter_t	*filter;
66	void			*filterarg;
67	bus_size_t		maxsize;
68	u_int			nsegments;
69	bus_size_t		maxsegsz;
70	int			flags;
71	int			ref_count;
72	int			map_count;
73	bus_dma_lock_t		*lockfunc;
74	void			*lockfuncarg;
75	/*
76	 * DMA range for this tag.  If the page doesn't fall within
77	 * one of these ranges, an error is returned.  The caller
78	 * may then decide what to do with the transfer.  If the
79	 * range pointer is NULL, it is ignored.
80	 */
81	struct arm32_dma_range	*ranges;
82	int			_nranges;
83};
84
85#define DMAMAP_LINEAR		0x1
86#define DMAMAP_MBUF		0x2
87#define DMAMAP_UIO		0x4
88#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
89#define DMAMAP_COHERENT		0x8
90struct bus_dmamap {
91        bus_dma_tag_t	dmat;
92	int		flags;
93	void 		*buffer;
94	int		len;
95};
96
97/*
98 * Check to see if the specified page is in an allowed DMA range.
99 */
100
101static __inline int
102bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
103    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
104    int flags, vm_offset_t *lastaddrp, int *segp);
105
106static __inline struct arm32_dma_range *
107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
108    bus_addr_t curaddr)
109{
110	struct arm32_dma_range *dr;
111	int i;
112
113	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
114		if (curaddr >= dr->dr_sysbase &&
115		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
116			return (dr);
117	}
118
119	return (NULL);
120}
121/*
122 * Convenience function for manipulating driver locks from busdma (during
123 * busdma_swi, for example).  Drivers that don't provide their own locks
124 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
125 * non-mutex locking scheme don't have to use this at all.
126 */
127void
128busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
129{
130	struct mtx *dmtx;
131
132	dmtx = (struct mtx *)arg;
133	switch (op) {
134	case BUS_DMA_LOCK:
135		mtx_lock(dmtx);
136		break;
137	case BUS_DMA_UNLOCK:
138		mtx_unlock(dmtx);
139		break;
140	default:
141		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
142	}
143}
144
145/*
146 * dflt_lock should never get called.  It gets put into the dma tag when
147 * lockfunc == NULL, which is only valid if the maps that are associated
148 * with the tag are meant to never be defered.
149 * XXX Should have a way to identify which driver is responsible here.
150 */
151static void
152dflt_lock(void *arg, bus_dma_lock_op_t op)
153{
154#ifdef INVARIANTS
155	panic("driver error: busdma dflt_lock called");
156#else
157	printf("DRIVER_ERROR: busdma dflt_lock called\n");
158#endif
159}
160
161/*
162 * Allocate a device specific dma_tag.
163 */
164#define SEG_NB 1024
165
166int
167bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
168		   bus_size_t boundary, bus_addr_t lowaddr,
169		   bus_addr_t highaddr, bus_dma_filter_t *filter,
170		   void *filterarg, bus_size_t maxsize, int nsegments,
171		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
172		   void *lockfuncarg, bus_dma_tag_t *dmat)
173{
174	bus_dma_tag_t newtag;
175	int error = 0;
176	/* Return a NULL tag on failure */
177	*dmat = NULL;
178
179	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
180	if (newtag == NULL)
181		return (ENOMEM);
182
183	newtag->parent = parent;
184	newtag->alignment = alignment;
185	newtag->boundary = boundary;
186	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
187	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
188	newtag->filter = filter;
189	newtag->filterarg = filterarg;
190        newtag->maxsize = maxsize;
191        newtag->nsegments = nsegments;
192	newtag->maxsegsz = maxsegsz;
193	newtag->flags = flags;
194	newtag->ref_count = 1; /* Count ourself */
195	newtag->map_count = 0;
196	newtag->ranges = bus_dma_get_range();
197	newtag->_nranges = bus_dma_get_range_nb();
198	if (lockfunc != NULL) {
199		newtag->lockfunc = lockfunc;
200		newtag->lockfuncarg = lockfuncarg;
201	} else {
202		newtag->lockfunc = dflt_lock;
203		newtag->lockfuncarg = NULL;
204	}
205        /*
206	 * Take into account any restrictions imposed by our parent tag
207	 */
208        if (parent != NULL) {
209                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
210                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
211		if (newtag->boundary == 0)
212			newtag->boundary = parent->boundary;
213		else if (parent->boundary != 0)
214                	newtag->boundary = min(parent->boundary,
215					       newtag->boundary);
216                if (newtag->filter == NULL) {
217                        /*
218                         * Short circuit looking at our parent directly
219                         * since we have encapsulated all of its information
220                         */
221                        newtag->filter = parent->filter;
222                        newtag->filterarg = parent->filterarg;
223                        newtag->parent = parent->parent;
224		}
225		if (newtag->parent != NULL)
226			atomic_add_int(&parent->ref_count, 1);
227	}
228
229	*dmat = newtag;
230	return (error);
231}
232
233int
234bus_dma_tag_destroy(bus_dma_tag_t dmat)
235{
236	if (dmat != NULL) {
237
238                if (dmat->map_count != 0)
239                        return (EBUSY);
240
241                while (dmat != NULL) {
242                        bus_dma_tag_t parent;
243
244                        parent = dmat->parent;
245                        atomic_subtract_int(&dmat->ref_count, 1);
246                        if (dmat->ref_count == 0) {
247                                free(dmat, M_DEVBUF);
248                                /*
249                                 * Last reference count, so
250                                 * release our reference
251                                 * count on our parent.
252                                 */
253                                dmat = parent;
254                        } else
255                                dmat = NULL;
256                }
257        }
258        return (0);
259}
260
261/*
262 * Allocate a handle for mapping from kva/uva/physical
263 * address space into bus device space.
264 */
265int
266bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
267{
268	bus_dmamap_t newmap;
269
270	newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
271	if (newmap == NULL)
272		return (ENOMEM);
273	*mapp = newmap;
274	newmap->dmat = dmat;
275	newmap->flags = 0;
276	dmat->map_count++;
277
278	return (0);
279}
280
281/*
282 * Destroy a handle for mapping from kva/uva/physical
283 * address space into bus device space.
284 */
285int
286bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
287{
288
289	free(map, M_DEVBUF);
290        dmat->map_count--;
291        return (0);
292}
293
294/*
295 * Allocate a piece of memory that can be efficiently mapped into
296 * bus device space based on the constraints lited in the dma tag.
297 * A dmamap to for use with dmamap_load is also allocated.
298 */
299int
300bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
301                 bus_dmamap_t *mapp)
302{
303	bus_dmamap_t newmap = NULL;
304
305	int mflags;
306
307	if (flags & BUS_DMA_NOWAIT)
308		mflags = M_NOWAIT;
309	else
310		mflags = M_WAITOK;
311	if (flags & BUS_DMA_ZERO)
312		mflags |= M_ZERO;
313
314	if (!*mapp) {
315		newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
316		if (newmap == NULL)
317			return (ENOMEM);
318		dmat->map_count++;
319		newmap->flags = 0;
320		*mapp = newmap;
321		newmap->dmat = dmat;
322	}
323
324        if (dmat->maxsize <= PAGE_SIZE) {
325                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
326        } else {
327                /*
328                 * XXX Use Contigmalloc until it is merged into this facility
329                 *     and handles multi-seg allocations.  Nobody is doing
330                 *     multi-seg allocations yet though.
331                 */
332                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
333                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
334                    dmat->boundary);
335        }
336        if (*vaddr == NULL && newmap != NULL) {
337		free(newmap, M_DEVBUF);
338		dmat->map_count--;
339		*mapp = NULL;
340                return (ENOMEM);
341	}
342        return (0);
343}
344
345/*
346 * Free a piece of memory and it's allocated dmamap, that was allocated
347 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
348 */
349void
350bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
351{
352        if (dmat->maxsize <= PAGE_SIZE)
353		free(vaddr, M_DEVBUF);
354        else {
355		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
356	}
357	dmat->map_count--;
358	free(map, M_DEVBUF);
359}
360
361/*
362 * Map the buffer buf into bus space using the dmamap map.
363 */
364int
365bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
366                bus_size_t buflen, bus_dmamap_callback_t *callback,
367                void *callback_arg, int flags)
368{
369     	vm_offset_t	lastaddr = 0;
370	int		error, nsegs = -1;
371#ifdef __GNUC__
372	bus_dma_segment_t dm_segments[dmat->nsegments];
373#else
374	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
375#endif
376
377	map->flags &= ~DMAMAP_TYPE_MASK;
378	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
379	map->buffer = buf;
380	map->len = buflen;
381	error = bus_dmamap_load_buffer(dmat,
382	    dm_segments, map, buf, buflen, kernel_pmap,
383	    flags, &lastaddr, &nsegs);
384	if (error)
385		(*callback)(callback_arg, NULL, 0, error);
386	else
387		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
388
389	return (0);
390}
391
392/*
393 * Utility function to load a linear buffer.  lastaddrp holds state
394 * between invocations (for multiple-buffer loads).  segp contains
395 * the starting segment on entrance, and the ending segment on exit.
396 * first indicates if this is the first invocation of this function.
397 */
398static int __inline
399bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
400    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
401    int flags, vm_offset_t *lastaddrp, int *segp)
402{
403	bus_size_t sgsize;
404	bus_addr_t curaddr, lastaddr, baddr, bmask;
405	vm_offset_t vaddr = (vm_offset_t)buf;
406	int seg;
407	int error = 0;
408	pd_entry_t *pde;
409	pt_entry_t pte;
410	pt_entry_t *ptep;
411
412	lastaddr = *lastaddrp;
413	bmask = ~(dmat->boundary - 1);
414
415	for (seg = *segp; buflen > 0 ; ) {
416		/*
417		 * Get the physical address for this segment.
418		 *
419		 * XXX Don't support checking for coherent mappings
420		 * XXX in user address space.
421		 */
422		if (__predict_true(pmap == pmap_kernel())) {
423			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
424			if (__predict_false(pmap_pde_section(pde))) {
425				curaddr = (*pde & L1_S_FRAME) |
426				    (vaddr & L1_S_OFFSET);
427				if (*pde & L1_S_CACHE_MASK) {
428					map->flags &=
429					    ~DMAMAP_COHERENT;
430				}
431			} else {
432				pte = *ptep;
433				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
434				    ("INV type"));
435				if (__predict_false((pte & L2_TYPE_MASK)
436						    == L2_TYPE_L)) {
437					curaddr = (pte & L2_L_FRAME) |
438					    (vaddr & L2_L_OFFSET);
439					if (pte & L2_L_CACHE_MASK) {
440						map->flags &=
441						    ~DMAMAP_COHERENT;
442
443					}
444				} else {
445					curaddr = (pte & L2_S_FRAME) |
446					    (vaddr & L2_S_OFFSET);
447					if (pte & L2_S_CACHE_MASK) {
448						map->flags &=
449						    ~DMAMAP_COHERENT;
450					}
451				}
452			}
453		} else {
454			curaddr = pmap_extract(pmap, vaddr);
455			map->flags &= ~DMAMAP_COHERENT;
456		}
457
458		if (dmat->ranges) {
459			struct arm32_dma_range *dr;
460
461			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
462			    curaddr);
463			if (dr == NULL)
464				return (EINVAL);
465			/*
466		     	 * In a valid DMA range.  Translate the physical
467			 * memory address to an address in the DMA window.
468			 */
469			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
470
471		}
472		/*
473		 * Compute the segment size, and adjust counts.
474		 */
475		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
476		if (buflen < sgsize)
477			sgsize = buflen;
478
479		/*
480		 * Make sure we don't cross any boundaries.
481		 */
482		if (dmat->boundary > 0) {
483			baddr = (curaddr + dmat->boundary) & bmask;
484			if (sgsize > (baddr - curaddr))
485				sgsize = (baddr - curaddr);
486		}
487
488		/*
489		 * Insert chunk into a segment, coalescing with
490		 * the previous segment if possible.
491		 */
492		if (seg >= 0 && curaddr == lastaddr &&
493		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
494		    (dmat->boundary == 0 ||
495		     (segs[seg].ds_addr & bmask) ==
496		     (curaddr & bmask))) {
497			segs[seg].ds_len += sgsize;
498				goto segdone;
499		} else {
500			if (++seg >= dmat->nsegments)
501				break;
502			segs[seg].ds_addr = curaddr;
503			segs[seg].ds_len = sgsize;
504		}
505		if (error)
506			break;
507segdone:
508		lastaddr = curaddr + sgsize;
509		vaddr += sgsize;
510		buflen -= sgsize;
511	}
512
513	*segp = seg;
514	*lastaddrp = lastaddr;
515
516	/*
517	 * Did we fit?
518	 */
519	if (buflen != 0)
520		error = EFBIG; /* XXX better return value here? */
521	return (error);
522}
523
524/*
525 * Like bus_dmamap_load(), but for mbufs.
526 */
527int
528bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
529		     bus_dmamap_callback2_t *callback, void *callback_arg,
530		     int flags)
531{
532#ifdef __GNUC__
533	bus_dma_segment_t dm_segments[dmat->nsegments];
534#else
535	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
536#endif
537	int nsegs = -1, error = 0;
538
539	M_ASSERTPKTHDR(m0);
540
541	map->flags &= ~DMAMAP_TYPE_MASK;
542	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
543	map->buffer = m0;
544	if (m0->m_pkthdr.len <= dmat->maxsize) {
545		vm_offset_t lastaddr = 0;
546		struct mbuf *m;
547
548		for (m = m0; m != NULL && error == 0; m = m->m_next) {
549			if (m->m_len > 0)
550				error = bus_dmamap_load_buffer(dmat,
551				    dm_segments, map, m->m_data, m->m_len,
552				    pmap_kernel(), flags, &lastaddr, &nsegs);
553		}
554	} else {
555		error = EINVAL;
556	}
557
558	if (error) {
559		/*
560		 * force "no valid mappings" on error in callback.
561		 */
562		(*callback)(callback_arg, dm_segments, 0, 0, error);
563	} else {
564		(*callback)(callback_arg, dm_segments, nsegs + 1,
565		    m0->m_pkthdr.len, error);
566	}
567	return (error);
568}
569
570int
571bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
572			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
573			int flags)
574{
575	int error = 0;
576	M_ASSERTPKTHDR(m0);
577
578	flags |= BUS_DMA_NOWAIT;
579	*nsegs = -1;
580	if (m0->m_pkthdr.len <= dmat->maxsize) {
581		int first = 1;
582		vm_offset_t lastaddr = 0;
583		struct mbuf *m;
584
585		for (m = m0; m != NULL && error == 0; m = m->m_next) {
586			if (m->m_len > 0) {
587				error = bus_dmamap_load_buffer(dmat, segs, map,
588						m->m_data, m->m_len,
589						pmap_kernel(), flags, &lastaddr,
590						nsegs);
591				first = 0;
592			}
593		}
594	} else {
595		error = EINVAL;
596	}
597
598	/* XXX FIXME: Having to increment nsegs is really annoying */
599	++*nsegs;
600	CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x "
601	    "error %d nsegs %d", dmat, dmat->flags, error, *nsegs);
602	return (error);
603}
604
605/*
606 * Like bus_dmamap_load(), but for uios.
607 */
608int
609bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
610    bus_dmamap_callback2_t *callback, void *callback_arg,
611    int flags)
612{
613	vm_offset_t lastaddr;
614#ifdef __GNUC__
615	bus_dma_segment_t dm_segments[dmat->nsegments];
616#else
617	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
618#endif
619	int nsegs, i, error;
620	bus_size_t resid;
621	struct iovec *iov;
622	struct pmap *pmap;
623
624	resid = uio->uio_resid;
625	iov = uio->uio_iov;
626	map->flags &= ~DMAMAP_TYPE_MASK;
627	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
628	map->buffer = uio;
629
630	if (uio->uio_segflg == UIO_USERSPACE) {
631		KASSERT(uio->uio_td != NULL,
632		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
633		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
634	} else
635		pmap = kernel_pmap;
636
637	error = 0;
638	nsegs = -1;
639	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
640		/*
641		 * Now at the first iovec to load.  Load each iovec
642		 * until we have exhausted the residual count.
643		 */
644		bus_size_t minlen =
645		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
646		caddr_t addr = (caddr_t) iov[i].iov_base;
647
648		if (minlen > 0) {
649			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
650			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
651
652			resid -= minlen;
653		}
654	}
655
656	if (error) {
657		/*
658		 * force "no valid mappings" on error in callback.
659		 */
660		(*callback)(callback_arg, dm_segments, 0, 0, error);
661	} else {
662		(*callback)(callback_arg, dm_segments, nsegs+1,
663		    uio->uio_resid, error);
664	}
665
666	return (error);
667}
668
669/*
670 * Release the mapping held by map.
671 */
672void
673bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
674{
675	map->flags &= ~DMAMAP_TYPE_MASK;
676	return;
677}
678
679static void
680bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
681{
682
683	if (op & BUS_DMASYNC_POSTREAD ||
684	    op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
685		cpu_dcache_wbinv_range((vm_offset_t)buf, len);
686		return;
687	}
688	if (op & BUS_DMASYNC_PREWRITE)
689		cpu_dcache_wb_range((vm_offset_t)buf, len);
690	if (op & BUS_DMASYNC_PREREAD) {
691		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
692 			cpu_dcache_inv_range((vm_offset_t)buf, len);
693		else
694			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
695	}
696}
697
698void
699bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
700{
701	struct mbuf *m;
702	struct uio *uio;
703	int resid;
704	struct iovec *iov;
705
706	if (op == BUS_DMASYNC_POSTWRITE)
707		return;
708	if (map->flags & DMAMAP_COHERENT)
709		return;
710	switch(map->flags & DMAMAP_TYPE_MASK) {
711	case DMAMAP_LINEAR:
712		bus_dmamap_sync_buf(map->buffer, map->len, op);
713		break;
714	case DMAMAP_MBUF:
715		m = map->buffer;
716		while (m) {
717			bus_dmamap_sync_buf(m->m_data, m->m_len, op);
718			m = m->m_next;
719		}
720		break;
721	case DMAMAP_UIO:
722		uio = map->buffer;
723		iov = uio->uio_iov;
724		resid = uio->uio_resid;
725		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
726			bus_size_t minlen = resid < iov[i].iov_len ? resid :
727			    iov[i].iov_len;
728			if (minlen > 0) {
729				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
730				    op);
731				resid -= minlen;
732			}
733		}
734		break;
735	default:
736		break;
737	}
738	cpu_drain_writebuf();
739}
740