busdma_machdep.c revision 139825
1139825Simp/*-
299657Sbenno * Copyright (c) 2002 Peter Grehan
399657Sbenno * Copyright (c) 1997, 1998 Justin T. Gibbs.
478342Sbenno * All rights reserved.
578342Sbenno *
678342Sbenno * Redistribution and use in source and binary forms, with or without
778342Sbenno * modification, are permitted provided that the following conditions
878342Sbenno * are met:
978342Sbenno * 1. Redistributions of source code must retain the above copyright
1099657Sbenno *    notice, this list of conditions, and the following disclaimer,
1199657Sbenno *    without modification, immediately at the beginning of the file.
1299657Sbenno * 2. The name of the author may not be used to endorse or promote products
1399657Sbenno *    derived from this software without specific prior written permission.
1478342Sbenno *
1599657Sbenno * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1699657Sbenno * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1799657Sbenno * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1899657Sbenno * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1999657Sbenno * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2099657Sbenno * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2199657Sbenno * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2299657Sbenno * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2399657Sbenno * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2499657Sbenno * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2599657Sbenno * SUCH DAMAGE.
2699657Sbenno *
2799657Sbenno *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
2878342Sbenno */
2978342Sbenno
30113038Sobrien#include <sys/cdefs.h>
31113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 139825 2005-01-07 02:29:27Z imp $");
3278342Sbenno
3399657Sbenno/*
3499657Sbenno * MacPPC bus dma support routines
3599657Sbenno */
3678342Sbenno
3799657Sbenno#include <sys/param.h>
3899657Sbenno#include <sys/systm.h>
3999657Sbenno#include <sys/malloc.h>
4099657Sbenno#include <sys/bus.h>
4199657Sbenno#include <sys/interrupt.h>
4299657Sbenno#include <sys/lock.h>
4399657Sbenno#include <sys/proc.h>
4499657Sbenno#include <sys/mutex.h>
45108939Sgrehan#include <sys/mbuf.h>
46108939Sgrehan#include <sys/uio.h>
4799657Sbenno
4899657Sbenno#include <vm/vm.h>
4999657Sbenno#include <vm/vm_page.h>
50108939Sgrehan#include <vm/vm_map.h>
5199657Sbenno
52112436Smux#include <machine/atomic.h>
5399657Sbenno#include <machine/bus.h>
54109919Sbenno#include <machine/cpufunc.h>
5599657Sbenno
5699657Sbennostruct bus_dma_tag {
5799657Sbenno	bus_dma_tag_t     parent;
5899657Sbenno	bus_size_t        alignment;
5999657Sbenno	bus_size_t        boundary;
6099657Sbenno	bus_addr_t        lowaddr;
6199657Sbenno	bus_addr_t        highaddr;
6299657Sbenno	bus_dma_filter_t *filter;
6399657Sbenno	void             *filterarg;
6499657Sbenno	bus_size_t        maxsize;
6599657Sbenno	u_int             nsegments;
6699657Sbenno	bus_size_t        maxsegsz;
6799657Sbenno	int               flags;
6899657Sbenno	int               ref_count;
6999657Sbenno	int               map_count;
70117126Sscottl	bus_dma_lock_t	 *lockfunc;
71117126Sscottl	void		 *lockfuncarg;
7299657Sbenno};
7399657Sbenno
7499657Sbennostruct bus_dmamap {
7599657Sbenno        bus_dma_tag_t          dmat;
7699657Sbenno        void                  *buf;             /* unmapped buffer pointer */
7799657Sbenno        bus_size_t             buflen;          /* unmapped buffer length */
7899657Sbenno        bus_dmamap_callback_t *callback;
7999657Sbenno        void                  *callback_arg;
8099657Sbenno};
8199657Sbenno
8299657Sbenno/*
83117126Sscottl * Convenience function for manipulating driver locks from busdma (during
84117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
85117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
86117126Sscottl * non-mutex locking scheme don't have to use this at all.
87117126Sscottl */
88117126Sscottlvoid
89117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
90117126Sscottl{
91117126Sscottl	struct mtx *dmtx;
92117126Sscottl
93117126Sscottl	dmtx = (struct mtx *)arg;
94117126Sscottl	switch (op) {
95117126Sscottl	case BUS_DMA_LOCK:
96117126Sscottl		mtx_lock(dmtx);
97117126Sscottl		break;
98117126Sscottl	case BUS_DMA_UNLOCK:
99117126Sscottl		mtx_unlock(dmtx);
100117126Sscottl		break;
101117126Sscottl	default:
102117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
103117126Sscottl	}
104117126Sscottl}
105117126Sscottl
106117126Sscottl/*
107117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
108117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
109117126Sscottl * with the tag are meant to never be defered.
110117126Sscottl * XXX Should have a way to identify which driver is responsible here.
111117126Sscottl */
112117126Sscottlstatic void
113117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
114117126Sscottl{
115117126Sscottl#ifdef INVARIANTS
116117126Sscottl	panic("driver error: busdma dflt_lock called");
117117126Sscottl#else
118117126Sscottl	printf("DRIVER_ERROR: busdma dflt_lock called\n");
119117126Sscottl#endif
120117126Sscottl}
121117126Sscottl
122117126Sscottl/*
12399657Sbenno * Allocate a device specific dma_tag.
12499657Sbenno */
12599657Sbennoint
12699657Sbennobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
12799657Sbenno		   bus_size_t boundary, bus_addr_t lowaddr,
12899657Sbenno		   bus_addr_t highaddr, bus_dma_filter_t *filter,
12999657Sbenno		   void *filterarg, bus_size_t maxsize, int nsegments,
130117126Sscottl		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
131117126Sscottl		   void *lockfuncarg, bus_dma_tag_t *dmat)
13299657Sbenno{
13399657Sbenno	bus_dma_tag_t newtag;
13499657Sbenno	int error = 0;
13599657Sbenno
13699657Sbenno	/* Return a NULL tag on failure */
13799657Sbenno	*dmat = NULL;
13899657Sbenno
13999657Sbenno	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
14099657Sbenno	if (newtag == NULL)
14199657Sbenno		return (ENOMEM);
14299657Sbenno
14399657Sbenno	newtag->parent = parent;
14499657Sbenno	newtag->alignment = alignment;
14599657Sbenno	newtag->boundary = boundary;
14699657Sbenno	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
14799657Sbenno	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
14899657Sbenno	newtag->filter = filter;
14999657Sbenno	newtag->filterarg = filterarg;
15099657Sbenno        newtag->maxsize = maxsize;
15199657Sbenno        newtag->nsegments = nsegments;
15299657Sbenno	newtag->maxsegsz = maxsegsz;
15399657Sbenno	newtag->flags = flags;
15499657Sbenno	newtag->ref_count = 1; /* Count ourself */
15599657Sbenno	newtag->map_count = 0;
156117126Sscottl	if (lockfunc != NULL) {
157117126Sscottl		newtag->lockfunc = lockfunc;
158117126Sscottl		newtag->lockfuncarg = lockfuncarg;
159117126Sscottl	} else {
160117126Sscottl		newtag->lockfunc = dflt_lock;
161117126Sscottl		newtag->lockfuncarg = NULL;
162117126Sscottl	}
16399657Sbenno
16499657Sbenno        /*
16599657Sbenno	 * Take into account any restrictions imposed by our parent tag
16699657Sbenno	 */
16799657Sbenno        if (parent != NULL) {
16899657Sbenno                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
16999657Sbenno                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
170134934Sscottl		if (newtag->boundary == 0)
171134934Sscottl			newtag->boundary = parent->boundary;
172134934Sscottl		else if (parent->boundary != 0)
173134934Sscottl                	newtag->boundary = MIN(parent->boundary,
174134934Sscottl					       newtag->boundary);
17599657Sbenno                if (newtag->filter == NULL) {
17699657Sbenno                        /*
17799657Sbenno                         * Short circuit looking at our parent directly
17899657Sbenno                         * since we have encapsulated all of its information
17999657Sbenno                         */
18099657Sbenno                        newtag->filter = parent->filter;
18199657Sbenno                        newtag->filterarg = parent->filterarg;
18299657Sbenno                        newtag->parent = parent->parent;
18399657Sbenno		}
184112436Smux		if (newtag->parent != NULL)
185112436Smux			atomic_add_int(&parent->ref_count, 1);
18699657Sbenno	}
18799657Sbenno
18899657Sbenno	*dmat = newtag;
18999657Sbenno	return (error);
19099657Sbenno}
19199657Sbenno
19299657Sbennoint
19399657Sbennobus_dma_tag_destroy(bus_dma_tag_t dmat)
19499657Sbenno{
19599657Sbenno	if (dmat != NULL) {
19699657Sbenno
19799657Sbenno                if (dmat->map_count != 0)
19899657Sbenno                        return (EBUSY);
19999657Sbenno
20099657Sbenno                while (dmat != NULL) {
20199657Sbenno                        bus_dma_tag_t parent;
20299657Sbenno
20399657Sbenno                        parent = dmat->parent;
204112436Smux                        atomic_subtract_int(&dmat->ref_count, 1);
20599657Sbenno                        if (dmat->ref_count == 0) {
20699657Sbenno                                free(dmat, M_DEVBUF);
20799657Sbenno                                /*
20899657Sbenno                                 * Last reference count, so
20999657Sbenno                                 * release our reference
21099657Sbenno                                 * count on our parent.
21199657Sbenno                                 */
21299657Sbenno                                dmat = parent;
21399657Sbenno                        } else
21499657Sbenno                                dmat = NULL;
21599657Sbenno                }
21699657Sbenno        }
21799657Sbenno        return (0);
21899657Sbenno}
21999657Sbenno
22099657Sbenno/*
22199657Sbenno * Allocate a handle for mapping from kva/uva/physical
22299657Sbenno * address space into bus device space.
22399657Sbenno */
22499657Sbennoint
22599657Sbennobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
22699657Sbenno{
22799657Sbenno	*mapp = NULL;
22899657Sbenno	dmat->map_count++;
22999657Sbenno
23099657Sbenno	return (0);
23199657Sbenno}
23299657Sbenno
23399657Sbenno/*
23499657Sbenno * Destroy a handle for mapping from kva/uva/physical
23599657Sbenno * address space into bus device space.
23699657Sbenno */
23799657Sbennoint
23899657Sbennobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
23999657Sbenno{
24099657Sbenno        if (map != NULL) {
24199657Sbenno		panic("dmamap_destroy: NULL?\n");
24299657Sbenno        }
24399657Sbenno        dmat->map_count--;
24499657Sbenno        return (0);
24599657Sbenno}
24699657Sbenno
24799657Sbenno/*
24899657Sbenno * Allocate a piece of memory that can be efficiently mapped into
24999657Sbenno * bus device space based on the constraints lited in the dma tag.
25099657Sbenno * A dmamap to for use with dmamap_load is also allocated.
25199657Sbenno */
25299657Sbennoint
25399657Sbennobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
25499657Sbenno                 bus_dmamap_t *mapp)
25599657Sbenno{
256118081Smux	int mflags;
257118081Smux
258118081Smux	if (flags & BUS_DMA_NOWAIT)
259118081Smux		mflags = M_NOWAIT;
260118081Smux	else
261118081Smux		mflags = M_WAITOK;
262118081Smux	if (flags & BUS_DMA_ZERO)
263118081Smux		mflags |= M_ZERO;
264118081Smux
26599657Sbenno        *mapp = NULL;
26699657Sbenno
26799657Sbenno        if (dmat->maxsize <= PAGE_SIZE) {
268118081Smux                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
26999657Sbenno        } else {
27099657Sbenno                /*
27199657Sbenno                 * XXX Use Contigmalloc until it is merged into this facility
27299657Sbenno                 *     and handles multi-seg allocations.  Nobody is doing
27399657Sbenno                 *     multi-seg allocations yet though.
27499657Sbenno                 */
275118081Smux                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
27699657Sbenno                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
27799657Sbenno                    dmat->boundary);
27899657Sbenno        }
27999657Sbenno
28099657Sbenno        if (*vaddr == NULL)
28199657Sbenno                return (ENOMEM);
28299657Sbenno
28399657Sbenno        return (0);
28499657Sbenno}
28599657Sbenno
28699657Sbenno/*
287108939Sgrehan * Free a piece of memory and it's allocated dmamap, that was allocated
28899657Sbenno * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
28999657Sbenno */
29078342Sbennovoid
29199657Sbennobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
29278342Sbenno{
29399657Sbenno        if (map != NULL)
29499657Sbenno                panic("bus_dmamem_free: Invalid map freed\n");
29599657Sbenno        if (dmat->maxsize <= PAGE_SIZE)
29699657Sbenno		free(vaddr, M_DEVBUF);
297112196Smux        else {
29899657Sbenno		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
299112196Smux	}
30099657Sbenno}
30178342Sbenno
30299657Sbenno/*
30399657Sbenno * Map the buffer buf into bus space using the dmamap map.
30499657Sbenno */
30599657Sbennoint
30699657Sbennobus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
30799657Sbenno                bus_size_t buflen, bus_dmamap_callback_t *callback,
30899657Sbenno                void *callback_arg, int flags)
30999657Sbenno{
31099657Sbenno        vm_offset_t             vaddr;
31199657Sbenno        vm_offset_t             paddr;
31299657Sbenno#ifdef __GNUC__
31399657Sbenno        bus_dma_segment_t       dm_segments[dmat->nsegments];
31499657Sbenno#else
31599657Sbenno        bus_dma_segment_t       dm_segments[BUS_DMAMAP_NSEGS];
31699657Sbenno#endif
31799657Sbenno        bus_dma_segment_t      *sg;
31899657Sbenno        int                     seg;
31999657Sbenno        int                     error = 0;
32099657Sbenno        vm_offset_t             nextpaddr;
32199657Sbenno
32299657Sbenno        if (map != NULL)
32399657Sbenno		panic("bus_dmamap_load: Invalid map\n");
32499657Sbenno
32599657Sbenno        vaddr = (vm_offset_t)buf;
32699657Sbenno        sg = &dm_segments[0];
32799657Sbenno        seg = 1;
32899657Sbenno        sg->ds_len = 0;
32999657Sbenno        nextpaddr = 0;
33099657Sbenno
33199657Sbenno        do {
33299657Sbenno		bus_size_t      size;
33399657Sbenno
33499657Sbenno                paddr = pmap_kextract(vaddr);
33599657Sbenno                size = PAGE_SIZE - (paddr & PAGE_MASK);
33699657Sbenno                if (size > buflen)
33799657Sbenno                        size = buflen;
33899657Sbenno
33999657Sbenno                if (sg->ds_len == 0) {
34099657Sbenno                        sg->ds_addr = paddr;
34199657Sbenno                        sg->ds_len = size;
34299657Sbenno                } else if (paddr == nextpaddr) {
34399657Sbenno                        sg->ds_len += size;
34499657Sbenno                } else {
34599657Sbenno                        /* Go to the next segment */
34699657Sbenno                        sg++;
34799657Sbenno                        seg++;
34899657Sbenno                        if (seg > dmat->nsegments)
34999657Sbenno				break;
35099657Sbenno                        sg->ds_addr = paddr;
35199657Sbenno                        sg->ds_len = size;
35299657Sbenno                }
35399657Sbenno                vaddr += size;
35499657Sbenno                nextpaddr = paddr + size;
35599657Sbenno                buflen -= size;
356108939Sgrehan
357108939Sgrehan	} while (buflen > 0);
35899657Sbenno
359108939Sgrehan	if (buflen != 0) {
360108939Sgrehan		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
361108939Sgrehan		    (u_long)buflen);
362108939Sgrehan		error = EFBIG;
363108939Sgrehan	}
36499657Sbenno
365108939Sgrehan	(*callback)(callback_arg, dm_segments, seg, error);
36699657Sbenno
367108939Sgrehan	return (0);
36878342Sbenno}
36999657Sbenno
37099657Sbenno/*
371108939Sgrehan * Utility function to load a linear buffer.  lastaddrp holds state
372108939Sgrehan * between invocations (for multiple-buffer loads).  segp contains
373108939Sgrehan * the starting segment on entrance, and the ending segment on exit.
374108939Sgrehan * first indicates if this is the first invocation of this function.
37599657Sbenno */
376108939Sgrehanstatic int
377108939Sgrehanbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
378108939Sgrehan    void *buf, bus_size_t buflen, struct thread *td,
379108939Sgrehan    int flags, vm_offset_t *lastaddrp, int *segp,
380108939Sgrehan    int first)
381108939Sgrehan{
382108939Sgrehan	bus_size_t sgsize;
383108939Sgrehan	bus_addr_t curaddr, lastaddr, baddr, bmask;
384108939Sgrehan	vm_offset_t vaddr = (vm_offset_t)buf;
385108939Sgrehan	int seg;
386108939Sgrehan	pmap_t pmap;
387108939Sgrehan
388108939Sgrehan	if (td != NULL)
389108939Sgrehan		pmap = vmspace_pmap(td->td_proc->p_vmspace);
390108939Sgrehan	else
391108939Sgrehan		pmap = NULL;
392108939Sgrehan
393108939Sgrehan	lastaddr = *lastaddrp;
394108939Sgrehan	bmask = ~(dmat->boundary - 1);
395108939Sgrehan
396108939Sgrehan	for (seg = *segp; buflen > 0 ; ) {
397108939Sgrehan		/*
398108939Sgrehan		 * Get the physical address for this segment.
399108939Sgrehan		 */
400108939Sgrehan		if (pmap)
401108939Sgrehan			curaddr = pmap_extract(pmap, vaddr);
402108939Sgrehan		else
403108939Sgrehan			curaddr = pmap_kextract(vaddr);
404108939Sgrehan
405108939Sgrehan		/*
406108939Sgrehan		 * Compute the segment size, and adjust counts.
407108939Sgrehan		 */
408108939Sgrehan		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
409108939Sgrehan		if (buflen < sgsize)
410108939Sgrehan			sgsize = buflen;
411108939Sgrehan
412108939Sgrehan		/*
413108939Sgrehan		 * Make sure we don't cross any boundaries.
414108939Sgrehan		 */
415108939Sgrehan		if (dmat->boundary > 0) {
416108939Sgrehan			baddr = (curaddr + dmat->boundary) & bmask;
417108939Sgrehan			if (sgsize > (baddr - curaddr))
418108939Sgrehan				sgsize = (baddr - curaddr);
419108939Sgrehan		}
420108939Sgrehan
421108939Sgrehan		/*
422108939Sgrehan		 * Insert chunk into a segment, coalescing with
423108939Sgrehan		 * the previous segment if possible.
424108939Sgrehan		 */
425108939Sgrehan		if (first) {
426108939Sgrehan			segs[seg].ds_addr = curaddr;
427108939Sgrehan			segs[seg].ds_len = sgsize;
428108939Sgrehan			first = 0;
429108939Sgrehan		} else {
430108939Sgrehan			if (curaddr == lastaddr &&
431108939Sgrehan			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
432108939Sgrehan			    (dmat->boundary == 0 ||
433108939Sgrehan			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
434108939Sgrehan				segs[seg].ds_len += sgsize;
435108939Sgrehan			else {
436108939Sgrehan				if (++seg >= dmat->nsegments)
437108939Sgrehan					break;
438108939Sgrehan				segs[seg].ds_addr = curaddr;
439108939Sgrehan				segs[seg].ds_len = sgsize;
440108939Sgrehan			}
441108939Sgrehan		}
442108939Sgrehan
443108939Sgrehan		lastaddr = curaddr + sgsize;
444108939Sgrehan		vaddr += sgsize;
445108939Sgrehan		buflen -= sgsize;
446108939Sgrehan	}
447108939Sgrehan
448108939Sgrehan	*segp = seg;
449108939Sgrehan	*lastaddrp = lastaddr;
450108939Sgrehan
451108939Sgrehan	/*
452108939Sgrehan	 * Did we fit?
453108939Sgrehan	 */
454108939Sgrehan	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
455108939Sgrehan}
456108939Sgrehan
457108939Sgrehan/*
458108939Sgrehan * Like bus_dmamap_load(), but for mbufs.
459108939Sgrehan */
460108939Sgrehanint
461108939Sgrehanbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
462108939Sgrehan		     bus_dmamap_callback2_t *callback, void *callback_arg,
463108939Sgrehan		     int flags)
464108939Sgrehan{
465108939Sgrehan#ifdef __GNUC__
466108939Sgrehan	bus_dma_segment_t dm_segments[dmat->nsegments];
467108939Sgrehan#else
468108939Sgrehan	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
469108939Sgrehan#endif
470108939Sgrehan	int nsegs = 0, error = 0;
471108939Sgrehan
472113255Sdes	M_ASSERTPKTHDR(m0);
473108939Sgrehan
474108939Sgrehan	if (m0->m_pkthdr.len <= dmat->maxsize) {
475108939Sgrehan		int first = 1;
476108939Sgrehan		vm_offset_t lastaddr = 0;
477108939Sgrehan		struct mbuf *m;
478108939Sgrehan
479108939Sgrehan		for (m = m0; m != NULL && error == 0; m = m->m_next) {
480110335Sharti			if (m->m_len > 0) {
481110335Sharti				error = bus_dmamap_load_buffer(dmat,
482110335Sharti				    dm_segments, m->m_data, m->m_len, NULL,
483110335Sharti				    flags, &lastaddr, &nsegs, first);
484110335Sharti				first = 0;
485110335Sharti			}
486108939Sgrehan		}
487108939Sgrehan	} else {
488108939Sgrehan		error = EINVAL;
489108939Sgrehan	}
490108939Sgrehan
491108939Sgrehan	if (error) {
492108939Sgrehan		/*
493108939Sgrehan		 * force "no valid mappings" on error in callback.
494108939Sgrehan		 */
495108939Sgrehan		(*callback)(callback_arg, dm_segments, 0, 0, error);
496108939Sgrehan	} else {
497108939Sgrehan		(*callback)(callback_arg, dm_segments, nsegs+1,
498108939Sgrehan		    m0->m_pkthdr.len, error);
499108939Sgrehan	}
500108939Sgrehan	return (error);
501108939Sgrehan}
502108939Sgrehan
503108939Sgrehan/*
504108939Sgrehan * Like bus_dmamap_load(), but for uios.
505108939Sgrehan */
506108939Sgrehanint
507108939Sgrehanbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
508108939Sgrehan    bus_dmamap_callback2_t *callback, void *callback_arg,
509108939Sgrehan    int flags)
510108939Sgrehan{
511108939Sgrehan	vm_offset_t lastaddr;
512108939Sgrehan#ifdef __GNUC__
513108939Sgrehan	bus_dma_segment_t dm_segments[dmat->nsegments];
514108939Sgrehan#else
515108939Sgrehan	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
516108939Sgrehan#endif
517108939Sgrehan	int nsegs, i, error, first;
518108939Sgrehan	bus_size_t resid;
519108939Sgrehan	struct iovec *iov;
520113703Sgrehan	struct thread *td = NULL;
521108939Sgrehan
522108939Sgrehan	resid = uio->uio_resid;
523108939Sgrehan	iov = uio->uio_iov;
524108939Sgrehan
525108939Sgrehan	if (uio->uio_segflg == UIO_USERSPACE) {
526108939Sgrehan		td = uio->uio_td;
527108939Sgrehan		KASSERT(td != NULL,
528108939Sgrehan		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
529108939Sgrehan	}
530108939Sgrehan
531108939Sgrehan	first = 1;
532108939Sgrehan	nsegs = error = 0;
533108939Sgrehan	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
534108939Sgrehan		/*
535108939Sgrehan		 * Now at the first iovec to load.  Load each iovec
536108939Sgrehan		 * until we have exhausted the residual count.
537108939Sgrehan		 */
538108939Sgrehan		bus_size_t minlen =
539108939Sgrehan		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
540108939Sgrehan		caddr_t addr = (caddr_t) iov[i].iov_base;
541108939Sgrehan
542110335Sharti		if (minlen > 0) {
543110335Sharti			error = bus_dmamap_load_buffer(dmat, dm_segments, addr,
544110335Sharti			    minlen, td, flags, &lastaddr, &nsegs, first);
545108939Sgrehan
546110335Sharti			first = 0;
547108939Sgrehan
548110335Sharti			resid -= minlen;
549110335Sharti		}
550108939Sgrehan	}
551108939Sgrehan
552108939Sgrehan	if (error) {
553108939Sgrehan		/*
554108939Sgrehan		 * force "no valid mappings" on error in callback.
555108939Sgrehan		 */
556108939Sgrehan		(*callback)(callback_arg, dm_segments, 0, 0, error);
557108939Sgrehan	} else {
558108939Sgrehan		(*callback)(callback_arg, dm_segments, nsegs+1,
559108939Sgrehan		    uio->uio_resid, error);
560108939Sgrehan	}
561108939Sgrehan
562108939Sgrehan	return (error);
563108939Sgrehan}
564108939Sgrehan
565108939Sgrehan/*
566108939Sgrehan * Release the mapping held by map. A no-op on PowerPC.
567108939Sgrehan */
56899657Sbennovoid
56999657Sbennobus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
570109935Sbenno{
57199657Sbenno
572109935Sbenno	return;
573109935Sbenno}
574109935Sbenno
57599657Sbennovoid
576115343Sscottlbus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
577109919Sbenno{
578109919Sbenno
579109935Sbenno	return;
580109919Sbenno}
581