busdma_machdep-v4.c revision 137760
1/*
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 137760 2004-11-16 00:57:44Z cognet $");
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/lock.h>
45#include <sys/proc.h>
46#include <sys/mutex.h>
47#include <sys/mbuf.h>
48#include <sys/uio.h>
49
50#include <vm/vm.h>
51#include <vm/vm_page.h>
52#include <vm/vm_map.h>
53
54#include <machine/atomic.h>
55#include <machine/bus.h>
56#include <machine/cpufunc.h>
57
58struct bus_dma_tag {
59	bus_dma_tag_t		parent;
60	bus_size_t		alignment;
61	bus_size_t		boundary;
62	bus_addr_t		lowaddr;
63	bus_addr_t		highaddr;
64	bus_dma_filter_t	*filter;
65	void			*filterarg;
66	bus_size_t		maxsize;
67	u_int			nsegments;
68	bus_size_t		maxsegsz;
69	int			flags;
70	int			ref_count;
71	int			map_count;
72	bus_dma_lock_t		*lockfunc;
73	void			*lockfuncarg;
74	/*
75	 * DMA range for this tag.  If the page doesn't fall within
76	 * one of these ranges, an error is returned.  The caller
77	 * may then decide what to do with the transfer.  If the
78	 * range pointer is NULL, it is ignored.
79	 */
80	struct arm32_dma_range	*ranges;
81	int			_nranges;
82};
83
84#define DMAMAP_LINEAR		0x1
85#define DMAMAP_MBUF		0x2
86#define DMAMAP_UIO		0x4
87#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
88#define DMAMAP_COHERENT		0x8
89struct bus_dmamap {
90        bus_dma_tag_t	dmat;
91	int		flags;
92	void 		*buffer;
93	int		len;
94};
95
96/*
97 * Check to see if the specified page is in an allowed DMA range.
98 */
99
100static __inline int
101bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
102    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
103    int flags, vm_offset_t *lastaddrp, int *segp);
104
105static __inline struct arm32_dma_range *
106_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
107    bus_addr_t curaddr)
108{
109	struct arm32_dma_range *dr;
110	int i;
111
112	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
113		if (curaddr >= dr->dr_sysbase &&
114		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
115			return (dr);
116	}
117
118	return (NULL);
119}
120/*
121 * Convenience function for manipulating driver locks from busdma (during
122 * busdma_swi, for example).  Drivers that don't provide their own locks
123 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
124 * non-mutex locking scheme don't have to use this at all.
125 */
126void
127busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
128{
129	struct mtx *dmtx;
130
131	dmtx = (struct mtx *)arg;
132	switch (op) {
133	case BUS_DMA_LOCK:
134		mtx_lock(dmtx);
135		break;
136	case BUS_DMA_UNLOCK:
137		mtx_unlock(dmtx);
138		break;
139	default:
140		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
141	}
142}
143
144/*
145 * dflt_lock should never get called.  It gets put into the dma tag when
146 * lockfunc == NULL, which is only valid if the maps that are associated
147 * with the tag are meant to never be defered.
148 * XXX Should have a way to identify which driver is responsible here.
149 */
150static void
151dflt_lock(void *arg, bus_dma_lock_op_t op)
152{
153#ifdef INVARIANTS
154	panic("driver error: busdma dflt_lock called");
155#else
156	printf("DRIVER_ERROR: busdma dflt_lock called\n");
157#endif
158}
159
160/*
161 * Allocate a device specific dma_tag.
162 */
163#define SEG_NB 1024
164
165int
166bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
167		   bus_size_t boundary, bus_addr_t lowaddr,
168		   bus_addr_t highaddr, bus_dma_filter_t *filter,
169		   void *filterarg, bus_size_t maxsize, int nsegments,
170		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
171		   void *lockfuncarg, bus_dma_tag_t *dmat)
172{
173	bus_dma_tag_t newtag;
174	int error = 0;
175	/* Return a NULL tag on failure */
176	*dmat = NULL;
177
178	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
179	if (newtag == NULL)
180		return (ENOMEM);
181
182	newtag->parent = parent;
183	newtag->alignment = alignment;
184	newtag->boundary = boundary;
185	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
186	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
187	newtag->filter = filter;
188	newtag->filterarg = filterarg;
189        newtag->maxsize = maxsize;
190        newtag->nsegments = nsegments;
191	newtag->maxsegsz = maxsegsz;
192	newtag->flags = flags;
193	newtag->ref_count = 1; /* Count ourself */
194	newtag->map_count = 0;
195	newtag->ranges = bus_dma_get_range();
196	newtag->_nranges = bus_dma_get_range_nb();
197	if (lockfunc != NULL) {
198		newtag->lockfunc = lockfunc;
199		newtag->lockfuncarg = lockfuncarg;
200	} else {
201		newtag->lockfunc = dflt_lock;
202		newtag->lockfuncarg = NULL;
203	}
204        /*
205	 * Take into account any restrictions imposed by our parent tag
206	 */
207        if (parent != NULL) {
208                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
209                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
210		if (newtag->boundary == 0)
211			newtag->boundary = parent->boundary;
212		else if (parent->boundary != 0)
213                	newtag->boundary = min(parent->boundary,
214					       newtag->boundary);
215                if (newtag->filter == NULL) {
216                        /*
217                         * Short circuit looking at our parent directly
218                         * since we have encapsulated all of its information
219                         */
220                        newtag->filter = parent->filter;
221                        newtag->filterarg = parent->filterarg;
222                        newtag->parent = parent->parent;
223		}
224		if (newtag->parent != NULL)
225			atomic_add_int(&parent->ref_count, 1);
226	}
227
228	*dmat = newtag;
229	return (error);
230}
231
232int
233bus_dma_tag_destroy(bus_dma_tag_t dmat)
234{
235	if (dmat != NULL) {
236
237                if (dmat->map_count != 0)
238                        return (EBUSY);
239
240                while (dmat != NULL) {
241                        bus_dma_tag_t parent;
242
243                        parent = dmat->parent;
244                        atomic_subtract_int(&dmat->ref_count, 1);
245                        if (dmat->ref_count == 0) {
246                                free(dmat, M_DEVBUF);
247                                /*
248                                 * Last reference count, so
249                                 * release our reference
250                                 * count on our parent.
251                                 */
252                                dmat = parent;
253                        } else
254                                dmat = NULL;
255                }
256        }
257        return (0);
258}
259
260/*
261 * Allocate a handle for mapping from kva/uva/physical
262 * address space into bus device space.
263 */
264int
265bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
266{
267	bus_dmamap_t newmap;
268
269	newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
270	if (newmap == NULL)
271		return (ENOMEM);
272	*mapp = newmap;
273	newmap->dmat = dmat;
274	newmap->flags = 0;
275	dmat->map_count++;
276
277	return (0);
278}
279
280/*
281 * Destroy a handle for mapping from kva/uva/physical
282 * address space into bus device space.
283 */
284int
285bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
286{
287
288	free(map, M_DEVBUF);
289        dmat->map_count--;
290        return (0);
291}
292
293/*
294 * Allocate a piece of memory that can be efficiently mapped into
295 * bus device space based on the constraints lited in the dma tag.
296 * A dmamap to for use with dmamap_load is also allocated.
297 */
298int
299bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
300                 bus_dmamap_t *mapp)
301{
302	bus_dmamap_t newmap = NULL;
303
304	int mflags;
305
306	if (flags & BUS_DMA_NOWAIT)
307		mflags = M_NOWAIT;
308	else
309		mflags = M_WAITOK;
310	if (flags & BUS_DMA_ZERO)
311		mflags |= M_ZERO;
312
313	if (!*mapp) {
314		newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
315		if (newmap == NULL)
316			return (ENOMEM);
317		dmat->map_count++;
318		newmap->flags = 0;
319		*mapp = newmap;
320		newmap->dmat = dmat;
321	}
322
323        if (dmat->maxsize <= PAGE_SIZE) {
324                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
325        } else {
326                /*
327                 * XXX Use Contigmalloc until it is merged into this facility
328                 *     and handles multi-seg allocations.  Nobody is doing
329                 *     multi-seg allocations yet though.
330                 */
331                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
332                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
333                    dmat->boundary);
334        }
335        if (*vaddr == NULL && newmap != NULL) {
336		free(newmap, M_DEVBUF);
337		dmat->map_count--;
338		*mapp = NULL;
339                return (ENOMEM);
340	}
341        return (0);
342}
343
344/*
345 * Free a piece of memory and it's allocated dmamap, that was allocated
346 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
347 */
348void
349bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
350{
351        if (dmat->maxsize <= PAGE_SIZE)
352		free(vaddr, M_DEVBUF);
353        else {
354		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
355	}
356	dmat->map_count--;
357	free(map, M_DEVBUF);
358}
359
360/*
361 * Map the buffer buf into bus space using the dmamap map.
362 */
363int
364bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
365                bus_size_t buflen, bus_dmamap_callback_t *callback,
366                void *callback_arg, int flags)
367{
368     	vm_offset_t	lastaddr = 0;
369	int		error, nsegs = -1;
370#ifdef __GNUC__
371	bus_dma_segment_t dm_segments[dmat->nsegments];
372#else
373	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
374#endif
375
376	map->flags &= ~DMAMAP_TYPE_MASK;
377	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
378	map->buffer = buf;
379	map->len = buflen;
380	error = bus_dmamap_load_buffer(dmat,
381	    dm_segments, map, buf, buflen, kernel_pmap,
382	    flags, &lastaddr, &nsegs);
383	if (error)
384		(*callback)(callback_arg, NULL, 0, error);
385	else
386		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
387
388	return (0);
389}
390
391/*
392 * Utility function to load a linear buffer.  lastaddrp holds state
393 * between invocations (for multiple-buffer loads).  segp contains
394 * the starting segment on entrance, and the ending segment on exit.
395 * first indicates if this is the first invocation of this function.
396 */
397static int __inline
398bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
399    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
400    int flags, vm_offset_t *lastaddrp, int *segp)
401{
402	bus_size_t sgsize;
403	bus_addr_t curaddr, lastaddr, baddr, bmask;
404	vm_offset_t vaddr = (vm_offset_t)buf;
405	int seg;
406	int error = 0;
407	pd_entry_t *pde;
408	pt_entry_t pte;
409	pt_entry_t *ptep;
410
411	lastaddr = *lastaddrp;
412	bmask = ~(dmat->boundary - 1);
413
414	for (seg = *segp; buflen > 0 ; ) {
415		/*
416		 * Get the physical address for this segment.
417		 *
418		 * XXX Don't support checking for coherent mappings
419		 * XXX in user address space.
420		 */
421		if (__predict_true(pmap == pmap_kernel())) {
422			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
423			if (__predict_false(pmap_pde_section(pde))) {
424				curaddr = (*pde & L1_S_FRAME) |
425				    (vaddr & L1_S_OFFSET);
426				if (*pde & L1_S_CACHE_MASK) {
427					map->flags &=
428					    ~DMAMAP_COHERENT;
429				}
430			} else {
431				pte = *ptep;
432				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
433				    ("INV type"));
434				if (__predict_false((pte & L2_TYPE_MASK)
435						    == L2_TYPE_L)) {
436					curaddr = (pte & L2_L_FRAME) |
437					    (vaddr & L2_L_OFFSET);
438					if (pte & L2_L_CACHE_MASK) {
439						map->flags &=
440						    ~DMAMAP_COHERENT;
441
442					}
443				} else {
444					curaddr = (pte & L2_S_FRAME) |
445					    (vaddr & L2_S_OFFSET);
446					if (pte & L2_S_CACHE_MASK) {
447						map->flags &=
448						    ~DMAMAP_COHERENT;
449					}
450				}
451			}
452		} else {
453			curaddr = pmap_extract(pmap, vaddr);
454			map->flags &= ~DMAMAP_COHERENT;
455		}
456
457		if (dmat->ranges) {
458			struct arm32_dma_range *dr;
459
460			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
461			    curaddr);
462			if (dr == NULL)
463				return (EINVAL);
464			/*
465		     	 * In a valid DMA range.  Translate the physical
466			 * memory address to an address in the DMA window.
467			 */
468			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
469
470		}
471		/*
472		 * Compute the segment size, and adjust counts.
473		 */
474		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
475		if (buflen < sgsize)
476			sgsize = buflen;
477
478		/*
479		 * Make sure we don't cross any boundaries.
480		 */
481		if (dmat->boundary > 0) {
482			baddr = (curaddr + dmat->boundary) & bmask;
483			if (sgsize > (baddr - curaddr))
484				sgsize = (baddr - curaddr);
485		}
486
487		/*
488		 * Insert chunk into a segment, coalescing with
489		 * the previous segment if possible.
490		 */
491		if (seg >= 0 && curaddr == lastaddr &&
492		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
493		    (dmat->boundary == 0 ||
494		     (segs[seg].ds_addr & bmask) ==
495		     (curaddr & bmask))) {
496			segs[seg].ds_len += sgsize;
497				goto segdone;
498		} else {
499			if (++seg >= dmat->nsegments)
500				break;
501			segs[seg].ds_addr = curaddr;
502			segs[seg].ds_len = sgsize;
503		}
504		if (error)
505			break;
506segdone:
507		lastaddr = curaddr + sgsize;
508		vaddr += sgsize;
509		buflen -= sgsize;
510	}
511
512	*segp = seg;
513	*lastaddrp = lastaddr;
514
515	/*
516	 * Did we fit?
517	 */
518	if (buflen != 0)
519		error = EFBIG; /* XXX better return value here? */
520	return (error);
521}
522
523/*
524 * Like bus_dmamap_load(), but for mbufs.
525 */
526int
527bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
528		     bus_dmamap_callback2_t *callback, void *callback_arg,
529		     int flags)
530{
531#ifdef __GNUC__
532	bus_dma_segment_t dm_segments[dmat->nsegments];
533#else
534	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
535#endif
536	int nsegs = -1, error = 0;
537
538	M_ASSERTPKTHDR(m0);
539
540	map->flags &= ~DMAMAP_TYPE_MASK;
541	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
542	map->buffer = m0;
543	if (m0->m_pkthdr.len <= dmat->maxsize) {
544		vm_offset_t lastaddr = 0;
545		struct mbuf *m;
546
547		for (m = m0; m != NULL && error == 0; m = m->m_next) {
548			if (m->m_len > 0)
549				error = bus_dmamap_load_buffer(dmat,
550				    dm_segments, map, m->m_data, m->m_len,
551				    pmap_kernel(), flags, &lastaddr, &nsegs);
552		}
553	} else {
554		error = EINVAL;
555	}
556
557	if (error) {
558		/*
559		 * force "no valid mappings" on error in callback.
560		 */
561		(*callback)(callback_arg, dm_segments, 0, 0, error);
562	} else {
563		(*callback)(callback_arg, dm_segments, nsegs + 1,
564		    m0->m_pkthdr.len, error);
565	}
566	return (error);
567}
568
569/*
570 * Like bus_dmamap_load(), but for uios.
571 */
572int
573bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
574    bus_dmamap_callback2_t *callback, void *callback_arg,
575    int flags)
576{
577	vm_offset_t lastaddr;
578#ifdef __GNUC__
579	bus_dma_segment_t dm_segments[dmat->nsegments];
580#else
581	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
582#endif
583	int nsegs, i, error;
584	bus_size_t resid;
585	struct iovec *iov;
586	struct pmap *pmap;
587
588	resid = uio->uio_resid;
589	iov = uio->uio_iov;
590	map->flags &= ~DMAMAP_TYPE_MASK;
591	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
592	map->buffer = uio;
593
594	if (uio->uio_segflg == UIO_USERSPACE) {
595		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
596		KASSERT(td != NULL,
597		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
598	} else
599		pmap = kernel_pmap;
600
601	error = 0;
602	nsegs = -1;
603	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
604		/*
605		 * Now at the first iovec to load.  Load each iovec
606		 * until we have exhausted the residual count.
607		 */
608		bus_size_t minlen =
609		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
610		caddr_t addr = (caddr_t) iov[i].iov_base;
611
612		if (minlen > 0) {
613			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
614			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
615
616			resid -= minlen;
617		}
618	}
619
620	if (error) {
621		/*
622		 * force "no valid mappings" on error in callback.
623		 */
624		(*callback)(callback_arg, dm_segments, 0, 0, error);
625	} else {
626		(*callback)(callback_arg, dm_segments, nsegs+1,
627		    uio->uio_resid, error);
628	}
629
630	return (error);
631}
632
633/*
634 * Release the mapping held by map.
635 */
636void
637bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
638{
639	map->flags &= ~DMAMAP_TYPE_MASK;
640	return;
641}
642
643static void
644bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
645{
646
647	if (op & BUS_DMASYNC_POSTREAD ||
648	    op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
649		cpu_dcache_wbinv_range((vm_offset_t)buf, len);
650		return;
651	}
652	if (op & BUS_DMASYNC_PREWRITE)
653		cpu_dcache_wb_range((vm_offset_t)buf, len);
654	if (op & BUS_DMASYNC_PREREAD) {
655		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
656 			cpu_dcache_inv_range((vm_offset_t)buf, len);
657		else
658			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
659	}
660}
661
662void
663bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
664{
665	struct mbuf *m;
666	struct uio *uio;
667	int resid;
668	struct iovec *iov;
669
670	if (op == BUS_DMASYNC_POSTWRITE)
671		return;
672	if (map->flags & DMAMAP_COHERENT)
673		return;
674	switch(map->flags & DMAMAP_TYPE_MASK) {
675	case DMAMAP_LINEAR:
676		bus_dmamap_sync_buf(map->buffer, map->len, op);
677		break;
678	case DMAMAP_MBUF:
679		m = map->buffer;
680		while (m) {
681			bus_dmamap_sync_buf(m->m_data, m->m_len, op);
682			m = m->m_next;
683		}
684		break;
685	case DMAMAP_UIO:
686		uio = map->buffer;
687		iov = uio->uio_iov;
688		resid = uio->uio_resid;
689		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
690			bus_size_t minlen = resid < iov[i].iov_len ? resid :
691			    iov[i].iov_len;
692			if (minlen > 0) {
693				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
694				    op);
695				resid -= minlen;
696			}
697		}
698		break;
699	default:
700		break;
701	}
702	cpu_drain_writebuf();
703}
704