busdma_machdep.c revision 118081
178342Sbenno/*
299657Sbenno * Copyright (c) 2002 Peter Grehan
399657Sbenno * Copyright (c) 1997, 1998 Justin T. Gibbs.
478342Sbenno * All rights reserved.
578342Sbenno *
678342Sbenno * Redistribution and use in source and binary forms, with or without
778342Sbenno * modification, are permitted provided that the following conditions
878342Sbenno * are met:
978342Sbenno * 1. Redistributions of source code must retain the above copyright
1099657Sbenno *    notice, this list of conditions, and the following disclaimer,
1199657Sbenno *    without modification, immediately at the beginning of the file.
1299657Sbenno * 2. The name of the author may not be used to endorse or promote products
1399657Sbenno *    derived from this software without specific prior written permission.
1478342Sbenno *
1599657Sbenno * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1699657Sbenno * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1799657Sbenno * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1899657Sbenno * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
1999657Sbenno * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2099657Sbenno * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2199657Sbenno * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2299657Sbenno * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2399657Sbenno * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2499657Sbenno * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2599657Sbenno * SUCH DAMAGE.
2699657Sbenno *
2799657Sbenno *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
2878342Sbenno */
2978342Sbenno
30113038Sobrien#include <sys/cdefs.h>
31113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/busdma_machdep.c 118081 2003-07-27 13:52:10Z mux $");
3278342Sbenno
3399657Sbenno/*
3499657Sbenno * MacPPC bus dma support routines
3599657Sbenno */
3678342Sbenno
3799657Sbenno#include <sys/param.h>
3899657Sbenno#include <sys/systm.h>
3999657Sbenno#include <sys/malloc.h>
4099657Sbenno#include <sys/bus.h>
4199657Sbenno#include <sys/interrupt.h>
4299657Sbenno#include <sys/lock.h>
4399657Sbenno#include <sys/proc.h>
4499657Sbenno#include <sys/mutex.h>
45108939Sgrehan#include <sys/mbuf.h>
46108939Sgrehan#include <sys/uio.h>
4799657Sbenno
4899657Sbenno#include <vm/vm.h>
4999657Sbenno#include <vm/vm_page.h>
50108939Sgrehan#include <vm/vm_map.h>
5199657Sbenno
52112436Smux#include <machine/atomic.h>
5399657Sbenno#include <machine/bus.h>
54109919Sbenno#include <machine/cpufunc.h>
5599657Sbenno
5699657Sbennostruct bus_dma_tag {
5799657Sbenno	bus_dma_tag_t     parent;
5899657Sbenno	bus_size_t        alignment;
5999657Sbenno	bus_size_t        boundary;
6099657Sbenno	bus_addr_t        lowaddr;
6199657Sbenno	bus_addr_t        highaddr;
6299657Sbenno	bus_dma_filter_t *filter;
6399657Sbenno	void             *filterarg;
6499657Sbenno	bus_size_t        maxsize;
6599657Sbenno	u_int             nsegments;
6699657Sbenno	bus_size_t        maxsegsz;
6799657Sbenno	int               flags;
6899657Sbenno	int               ref_count;
6999657Sbenno	int               map_count;
70117126Sscottl	bus_dma_lock_t	 *lockfunc;
71117126Sscottl	void		 *lockfuncarg;
7299657Sbenno};
7399657Sbenno
7499657Sbennostruct bus_dmamap {
7599657Sbenno        bus_dma_tag_t          dmat;
7699657Sbenno        void                  *buf;             /* unmapped buffer pointer */
7799657Sbenno        bus_size_t             buflen;          /* unmapped buffer length */
7899657Sbenno        bus_dmamap_callback_t *callback;
7999657Sbenno        void                  *callback_arg;
8099657Sbenno};
8199657Sbenno
8299657Sbenno/*
83117126Sscottl * Convenience function for manipulating driver locks from busdma (during
84117126Sscottl * busdma_swi, for example).  Drivers that don't provide their own locks
85117126Sscottl * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
86117126Sscottl * non-mutex locking scheme don't have to use this at all.
87117126Sscottl */
88117126Sscottlvoid
89117126Sscottlbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
90117126Sscottl{
91117126Sscottl	struct mtx *dmtx;
92117126Sscottl
93117126Sscottl	dmtx = (struct mtx *)arg;
94117126Sscottl	switch (op) {
95117126Sscottl	case BUS_DMA_LOCK:
96117126Sscottl		mtx_lock(dmtx);
97117126Sscottl		break;
98117126Sscottl	case BUS_DMA_UNLOCK:
99117126Sscottl		mtx_unlock(dmtx);
100117126Sscottl		break;
101117126Sscottl	default:
102117126Sscottl		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
103117126Sscottl	}
104117126Sscottl}
105117126Sscottl
106117126Sscottl/*
107117126Sscottl * dflt_lock should never get called.  It gets put into the dma tag when
108117126Sscottl * lockfunc == NULL, which is only valid if the maps that are associated
109117126Sscottl * with the tag are meant to never be defered.
110117126Sscottl * XXX Should have a way to identify which driver is responsible here.
111117126Sscottl */
112117126Sscottlstatic void
113117126Sscottldflt_lock(void *arg, bus_dma_lock_op_t op)
114117126Sscottl{
115117126Sscottl#ifdef INVARIANTS
116117126Sscottl	panic("driver error: busdma dflt_lock called");
117117126Sscottl#else
118117126Sscottl	printf("DRIVER_ERROR: busdma dflt_lock called\n");
119117126Sscottl#endif
120117126Sscottl}
121117126Sscottl
122117126Sscottl/*
12399657Sbenno * Allocate a device specific dma_tag.
12499657Sbenno */
12599657Sbennoint
12699657Sbennobus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
12799657Sbenno		   bus_size_t boundary, bus_addr_t lowaddr,
12899657Sbenno		   bus_addr_t highaddr, bus_dma_filter_t *filter,
12999657Sbenno		   void *filterarg, bus_size_t maxsize, int nsegments,
130117126Sscottl		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
131117126Sscottl		   void *lockfuncarg, bus_dma_tag_t *dmat)
13299657Sbenno{
13399657Sbenno	bus_dma_tag_t newtag;
13499657Sbenno	int error = 0;
13599657Sbenno
13699657Sbenno	/* Return a NULL tag on failure */
13799657Sbenno	*dmat = NULL;
13899657Sbenno
13999657Sbenno	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
14099657Sbenno	if (newtag == NULL)
14199657Sbenno		return (ENOMEM);
14299657Sbenno
14399657Sbenno	newtag->parent = parent;
14499657Sbenno	newtag->alignment = alignment;
14599657Sbenno	newtag->boundary = boundary;
14699657Sbenno	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
14799657Sbenno	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
14899657Sbenno	newtag->filter = filter;
14999657Sbenno	newtag->filterarg = filterarg;
15099657Sbenno        newtag->maxsize = maxsize;
15199657Sbenno        newtag->nsegments = nsegments;
15299657Sbenno	newtag->maxsegsz = maxsegsz;
15399657Sbenno	newtag->flags = flags;
15499657Sbenno	newtag->ref_count = 1; /* Count ourself */
15599657Sbenno	newtag->map_count = 0;
156117126Sscottl	if (lockfunc != NULL) {
157117126Sscottl		newtag->lockfunc = lockfunc;
158117126Sscottl		newtag->lockfuncarg = lockfuncarg;
159117126Sscottl	} else {
160117126Sscottl		newtag->lockfunc = dflt_lock;
161117126Sscottl		newtag->lockfuncarg = NULL;
162117126Sscottl	}
16399657Sbenno
16499657Sbenno        /*
16599657Sbenno	 * Take into account any restrictions imposed by our parent tag
16699657Sbenno	 */
16799657Sbenno        if (parent != NULL) {
16899657Sbenno                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
16999657Sbenno                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
17099657Sbenno
17199657Sbenno                /*
17299657Sbenno                 * XXX Not really correct??? Probably need to honor boundary
17399657Sbenno                 *     all the way up the inheritence chain.
17499657Sbenno                 */
17599657Sbenno                newtag->boundary = max(parent->boundary, newtag->boundary);
17699657Sbenno                if (newtag->filter == NULL) {
17799657Sbenno                        /*
17899657Sbenno                         * Short circuit looking at our parent directly
17999657Sbenno                         * since we have encapsulated all of its information
18099657Sbenno                         */
18199657Sbenno                        newtag->filter = parent->filter;
18299657Sbenno                        newtag->filterarg = parent->filterarg;
18399657Sbenno                        newtag->parent = parent->parent;
18499657Sbenno		}
185112436Smux		if (newtag->parent != NULL)
186112436Smux			atomic_add_int(&parent->ref_count, 1);
18799657Sbenno	}
18899657Sbenno
18999657Sbenno	*dmat = newtag;
19099657Sbenno	return (error);
19199657Sbenno}
19299657Sbenno
19399657Sbennoint
19499657Sbennobus_dma_tag_destroy(bus_dma_tag_t dmat)
19599657Sbenno{
19699657Sbenno	if (dmat != NULL) {
19799657Sbenno
19899657Sbenno                if (dmat->map_count != 0)
19999657Sbenno                        return (EBUSY);
20099657Sbenno
20199657Sbenno                while (dmat != NULL) {
20299657Sbenno                        bus_dma_tag_t parent;
20399657Sbenno
20499657Sbenno                        parent = dmat->parent;
205112436Smux                        atomic_subtract_int(&dmat->ref_count, 1);
20699657Sbenno                        if (dmat->ref_count == 0) {
20799657Sbenno                                free(dmat, M_DEVBUF);
20899657Sbenno                                /*
20999657Sbenno                                 * Last reference count, so
21099657Sbenno                                 * release our reference
21199657Sbenno                                 * count on our parent.
21299657Sbenno                                 */
21399657Sbenno                                dmat = parent;
21499657Sbenno                        } else
21599657Sbenno                                dmat = NULL;
21699657Sbenno                }
21799657Sbenno        }
21899657Sbenno        return (0);
21999657Sbenno}
22099657Sbenno
22199657Sbenno/*
22299657Sbenno * Allocate a handle for mapping from kva/uva/physical
22399657Sbenno * address space into bus device space.
22499657Sbenno */
22599657Sbennoint
22699657Sbennobus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
22799657Sbenno{
22899657Sbenno	*mapp = NULL;
22999657Sbenno	dmat->map_count++;
23099657Sbenno
23199657Sbenno	return (0);
23299657Sbenno}
23399657Sbenno
23499657Sbenno/*
23599657Sbenno * Destroy a handle for mapping from kva/uva/physical
23699657Sbenno * address space into bus device space.
23799657Sbenno */
23899657Sbennoint
23999657Sbennobus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
24099657Sbenno{
24199657Sbenno        if (map != NULL) {
24299657Sbenno		panic("dmamap_destroy: NULL?\n");
24399657Sbenno        }
24499657Sbenno        dmat->map_count--;
24599657Sbenno        return (0);
24699657Sbenno}
24799657Sbenno
24899657Sbenno/*
24999657Sbenno * Allocate a piece of memory that can be efficiently mapped into
25099657Sbenno * bus device space based on the constraints lited in the dma tag.
25199657Sbenno * A dmamap to for use with dmamap_load is also allocated.
25299657Sbenno */
25399657Sbennoint
25499657Sbennobus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
25599657Sbenno                 bus_dmamap_t *mapp)
25699657Sbenno{
257118081Smux	int mflags;
258118081Smux
259118081Smux	if (flags & BUS_DMA_NOWAIT)
260118081Smux		mflags = M_NOWAIT;
261118081Smux	else
262118081Smux		mflags = M_WAITOK;
263118081Smux	if (flags & BUS_DMA_ZERO)
264118081Smux		mflags |= M_ZERO;
265118081Smux
26699657Sbenno        *mapp = NULL;
26799657Sbenno
26899657Sbenno        if (dmat->maxsize <= PAGE_SIZE) {
269118081Smux                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
27099657Sbenno        } else {
27199657Sbenno                /*
27299657Sbenno                 * XXX Use Contigmalloc until it is merged into this facility
27399657Sbenno                 *     and handles multi-seg allocations.  Nobody is doing
27499657Sbenno                 *     multi-seg allocations yet though.
27599657Sbenno                 */
276118081Smux                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
27799657Sbenno                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
27899657Sbenno                    dmat->boundary);
27999657Sbenno        }
28099657Sbenno
28199657Sbenno        if (*vaddr == NULL)
28299657Sbenno                return (ENOMEM);
28399657Sbenno
28499657Sbenno        return (0);
28599657Sbenno}
28699657Sbenno
28799657Sbenno/*
288108939Sgrehan * Free a piece of memory and it's allocated dmamap, that was allocated
28999657Sbenno * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
29099657Sbenno */
29178342Sbennovoid
29299657Sbennobus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
29378342Sbenno{
29499657Sbenno        if (map != NULL)
29599657Sbenno                panic("bus_dmamem_free: Invalid map freed\n");
29699657Sbenno        if (dmat->maxsize <= PAGE_SIZE)
29799657Sbenno		free(vaddr, M_DEVBUF);
298112196Smux        else {
299112196Smux		mtx_lock(&Giant);
30099657Sbenno		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
301112196Smux		mtx_unlock(&Giant);
302112196Smux	}
30399657Sbenno}
30478342Sbenno
30599657Sbenno/*
30699657Sbenno * Map the buffer buf into bus space using the dmamap map.
30799657Sbenno */
30899657Sbennoint
30999657Sbennobus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
31099657Sbenno                bus_size_t buflen, bus_dmamap_callback_t *callback,
31199657Sbenno                void *callback_arg, int flags)
31299657Sbenno{
31399657Sbenno        vm_offset_t             vaddr;
31499657Sbenno        vm_offset_t             paddr;
31599657Sbenno#ifdef __GNUC__
31699657Sbenno        bus_dma_segment_t       dm_segments[dmat->nsegments];
31799657Sbenno#else
31899657Sbenno        bus_dma_segment_t       dm_segments[BUS_DMAMAP_NSEGS];
31999657Sbenno#endif
32099657Sbenno        bus_dma_segment_t      *sg;
32199657Sbenno        int                     seg;
32299657Sbenno        int                     error = 0;
32399657Sbenno        vm_offset_t             nextpaddr;
32499657Sbenno
32599657Sbenno        if (map != NULL)
32699657Sbenno		panic("bus_dmamap_load: Invalid map\n");
32799657Sbenno
32899657Sbenno        vaddr = (vm_offset_t)buf;
32999657Sbenno        sg = &dm_segments[0];
33099657Sbenno        seg = 1;
33199657Sbenno        sg->ds_len = 0;
33299657Sbenno        nextpaddr = 0;
33399657Sbenno
33499657Sbenno        do {
33599657Sbenno		bus_size_t      size;
33699657Sbenno
33799657Sbenno                paddr = pmap_kextract(vaddr);
33899657Sbenno                size = PAGE_SIZE - (paddr & PAGE_MASK);
33999657Sbenno                if (size > buflen)
34099657Sbenno                        size = buflen;
34199657Sbenno
34299657Sbenno                if (sg->ds_len == 0) {
34399657Sbenno                        sg->ds_addr = paddr;
34499657Sbenno                        sg->ds_len = size;
34599657Sbenno                } else if (paddr == nextpaddr) {
34699657Sbenno                        sg->ds_len += size;
34799657Sbenno                } else {
34899657Sbenno                        /* Go to the next segment */
34999657Sbenno                        sg++;
35099657Sbenno                        seg++;
35199657Sbenno                        if (seg > dmat->nsegments)
35299657Sbenno				break;
35399657Sbenno                        sg->ds_addr = paddr;
35499657Sbenno                        sg->ds_len = size;
35599657Sbenno                }
35699657Sbenno                vaddr += size;
35799657Sbenno                nextpaddr = paddr + size;
35899657Sbenno                buflen -= size;
359108939Sgrehan
360108939Sgrehan	} while (buflen > 0);
36199657Sbenno
362108939Sgrehan	if (buflen != 0) {
363108939Sgrehan		printf("bus_dmamap_load: Too many segs! buf_len = 0x%lx\n",
364108939Sgrehan		    (u_long)buflen);
365108939Sgrehan		error = EFBIG;
366108939Sgrehan	}
36799657Sbenno
368108939Sgrehan	(*callback)(callback_arg, dm_segments, seg, error);
36999657Sbenno
370108939Sgrehan	return (0);
37178342Sbenno}
37299657Sbenno
37399657Sbenno/*
374108939Sgrehan * Utility function to load a linear buffer.  lastaddrp holds state
375108939Sgrehan * between invocations (for multiple-buffer loads).  segp contains
376108939Sgrehan * the starting segment on entrance, and the ending segment on exit.
377108939Sgrehan * first indicates if this is the first invocation of this function.
37899657Sbenno */
379108939Sgrehanstatic int
380108939Sgrehanbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
381108939Sgrehan    void *buf, bus_size_t buflen, struct thread *td,
382108939Sgrehan    int flags, vm_offset_t *lastaddrp, int *segp,
383108939Sgrehan    int first)
384108939Sgrehan{
385108939Sgrehan	bus_size_t sgsize;
386108939Sgrehan	bus_addr_t curaddr, lastaddr, baddr, bmask;
387108939Sgrehan	vm_offset_t vaddr = (vm_offset_t)buf;
388108939Sgrehan	int seg;
389108939Sgrehan	pmap_t pmap;
390108939Sgrehan
391108939Sgrehan	if (td != NULL)
392108939Sgrehan		pmap = vmspace_pmap(td->td_proc->p_vmspace);
393108939Sgrehan	else
394108939Sgrehan		pmap = NULL;
395108939Sgrehan
396108939Sgrehan	lastaddr = *lastaddrp;
397108939Sgrehan	bmask = ~(dmat->boundary - 1);
398108939Sgrehan
399108939Sgrehan	for (seg = *segp; buflen > 0 ; ) {
400108939Sgrehan		/*
401108939Sgrehan		 * Get the physical address for this segment.
402108939Sgrehan		 */
403108939Sgrehan		if (pmap)
404108939Sgrehan			curaddr = pmap_extract(pmap, vaddr);
405108939Sgrehan		else
406108939Sgrehan			curaddr = pmap_kextract(vaddr);
407108939Sgrehan
408108939Sgrehan		/*
409108939Sgrehan		 * Compute the segment size, and adjust counts.
410108939Sgrehan		 */
411108939Sgrehan		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
412108939Sgrehan		if (buflen < sgsize)
413108939Sgrehan			sgsize = buflen;
414108939Sgrehan
415108939Sgrehan		/*
416108939Sgrehan		 * Make sure we don't cross any boundaries.
417108939Sgrehan		 */
418108939Sgrehan		if (dmat->boundary > 0) {
419108939Sgrehan			baddr = (curaddr + dmat->boundary) & bmask;
420108939Sgrehan			if (sgsize > (baddr - curaddr))
421108939Sgrehan				sgsize = (baddr - curaddr);
422108939Sgrehan		}
423108939Sgrehan
424108939Sgrehan		/*
425108939Sgrehan		 * Insert chunk into a segment, coalescing with
426108939Sgrehan		 * the previous segment if possible.
427108939Sgrehan		 */
428108939Sgrehan		if (first) {
429108939Sgrehan			segs[seg].ds_addr = curaddr;
430108939Sgrehan			segs[seg].ds_len = sgsize;
431108939Sgrehan			first = 0;
432108939Sgrehan		} else {
433108939Sgrehan			if (curaddr == lastaddr &&
434108939Sgrehan			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
435108939Sgrehan			    (dmat->boundary == 0 ||
436108939Sgrehan			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
437108939Sgrehan				segs[seg].ds_len += sgsize;
438108939Sgrehan			else {
439108939Sgrehan				if (++seg >= dmat->nsegments)
440108939Sgrehan					break;
441108939Sgrehan				segs[seg].ds_addr = curaddr;
442108939Sgrehan				segs[seg].ds_len = sgsize;
443108939Sgrehan			}
444108939Sgrehan		}
445108939Sgrehan
446108939Sgrehan		lastaddr = curaddr + sgsize;
447108939Sgrehan		vaddr += sgsize;
448108939Sgrehan		buflen -= sgsize;
449108939Sgrehan	}
450108939Sgrehan
451108939Sgrehan	*segp = seg;
452108939Sgrehan	*lastaddrp = lastaddr;
453108939Sgrehan
454108939Sgrehan	/*
455108939Sgrehan	 * Did we fit?
456108939Sgrehan	 */
457108939Sgrehan	return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
458108939Sgrehan}
459108939Sgrehan
460108939Sgrehan/*
461108939Sgrehan * Like bus_dmamap_load(), but for mbufs.
462108939Sgrehan */
463108939Sgrehanint
464108939Sgrehanbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
465108939Sgrehan		     bus_dmamap_callback2_t *callback, void *callback_arg,
466108939Sgrehan		     int flags)
467108939Sgrehan{
468108939Sgrehan#ifdef __GNUC__
469108939Sgrehan	bus_dma_segment_t dm_segments[dmat->nsegments];
470108939Sgrehan#else
471108939Sgrehan	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
472108939Sgrehan#endif
473108939Sgrehan	int nsegs = 0, error = 0;
474108939Sgrehan
475113255Sdes	M_ASSERTPKTHDR(m0);
476108939Sgrehan
477108939Sgrehan	if (m0->m_pkthdr.len <= dmat->maxsize) {
478108939Sgrehan		int first = 1;
479108939Sgrehan		vm_offset_t lastaddr = 0;
480108939Sgrehan		struct mbuf *m;
481108939Sgrehan
482108939Sgrehan		for (m = m0; m != NULL && error == 0; m = m->m_next) {
483110335Sharti			if (m->m_len > 0) {
484110335Sharti				error = bus_dmamap_load_buffer(dmat,
485110335Sharti				    dm_segments, m->m_data, m->m_len, NULL,
486110335Sharti				    flags, &lastaddr, &nsegs, first);
487110335Sharti				first = 0;
488110335Sharti			}
489108939Sgrehan		}
490108939Sgrehan	} else {
491108939Sgrehan		error = EINVAL;
492108939Sgrehan	}
493108939Sgrehan
494108939Sgrehan	if (error) {
495108939Sgrehan		/*
496108939Sgrehan		 * force "no valid mappings" on error in callback.
497108939Sgrehan		 */
498108939Sgrehan		(*callback)(callback_arg, dm_segments, 0, 0, error);
499108939Sgrehan	} else {
500108939Sgrehan		(*callback)(callback_arg, dm_segments, nsegs+1,
501108939Sgrehan		    m0->m_pkthdr.len, error);
502108939Sgrehan	}
503108939Sgrehan	return (error);
504108939Sgrehan}
505108939Sgrehan
506108939Sgrehan/*
507108939Sgrehan * Like bus_dmamap_load(), but for uios.
508108939Sgrehan */
509108939Sgrehanint
510108939Sgrehanbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
511108939Sgrehan    bus_dmamap_callback2_t *callback, void *callback_arg,
512108939Sgrehan    int flags)
513108939Sgrehan{
514108939Sgrehan	vm_offset_t lastaddr;
515108939Sgrehan#ifdef __GNUC__
516108939Sgrehan	bus_dma_segment_t dm_segments[dmat->nsegments];
517108939Sgrehan#else
518108939Sgrehan	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
519108939Sgrehan#endif
520108939Sgrehan	int nsegs, i, error, first;
521108939Sgrehan	bus_size_t resid;
522108939Sgrehan	struct iovec *iov;
523113703Sgrehan	struct thread *td = NULL;
524108939Sgrehan
525108939Sgrehan	resid = uio->uio_resid;
526108939Sgrehan	iov = uio->uio_iov;
527108939Sgrehan
528108939Sgrehan	if (uio->uio_segflg == UIO_USERSPACE) {
529108939Sgrehan		td = uio->uio_td;
530108939Sgrehan		KASSERT(td != NULL,
531108939Sgrehan		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
532108939Sgrehan	}
533108939Sgrehan
534108939Sgrehan	first = 1;
535108939Sgrehan	nsegs = error = 0;
536108939Sgrehan	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
537108939Sgrehan		/*
538108939Sgrehan		 * Now at the first iovec to load.  Load each iovec
539108939Sgrehan		 * until we have exhausted the residual count.
540108939Sgrehan		 */
541108939Sgrehan		bus_size_t minlen =
542108939Sgrehan		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
543108939Sgrehan		caddr_t addr = (caddr_t) iov[i].iov_base;
544108939Sgrehan
545110335Sharti		if (minlen > 0) {
546110335Sharti			error = bus_dmamap_load_buffer(dmat, dm_segments, addr,
547110335Sharti			    minlen, td, flags, &lastaddr, &nsegs, first);
548108939Sgrehan
549110335Sharti			first = 0;
550108939Sgrehan
551110335Sharti			resid -= minlen;
552110335Sharti		}
553108939Sgrehan	}
554108939Sgrehan
555108939Sgrehan	if (error) {
556108939Sgrehan		/*
557108939Sgrehan		 * force "no valid mappings" on error in callback.
558108939Sgrehan		 */
559108939Sgrehan		(*callback)(callback_arg, dm_segments, 0, 0, error);
560108939Sgrehan	} else {
561108939Sgrehan		(*callback)(callback_arg, dm_segments, nsegs+1,
562108939Sgrehan		    uio->uio_resid, error);
563108939Sgrehan	}
564108939Sgrehan
565108939Sgrehan	return (error);
566108939Sgrehan}
567108939Sgrehan
568108939Sgrehan/*
569108939Sgrehan * Release the mapping held by map. A no-op on PowerPC.
570108939Sgrehan */
57199657Sbennovoid
57299657Sbennobus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
573109935Sbenno{
57499657Sbenno
575109935Sbenno	return;
576109935Sbenno}
577109935Sbenno
57899657Sbennovoid
579115343Sscottlbus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
580109919Sbenno{
581109919Sbenno
582109935Sbenno	return;
583109919Sbenno}
584