busdma_machdep-v4.c revision 136743
162590Sitojun/*
2122615Sume * Copyright (c) 2004 Olivier Houchard
362590Sitojun * Copyright (c) 2002 Peter Grehan
455505Sshin * Copyright (c) 1997, 1998 Justin T. Gibbs.
555505Sshin * All rights reserved.
655505Sshin *
755505Sshin * Redistribution and use in source and binary forms, with or without
855505Sshin * modification, are permitted provided that the following conditions
955505Sshin * are met:
1055505Sshin * 1. Redistributions of source code must retain the above copyright
1155505Sshin *    notice, this list of conditions, and the following disclaimer,
1255505Sshin *    without modification, immediately at the beginning of the file.
1355505Sshin * 2. The name of the author may not be used to endorse or promote products
1455505Sshin *    derived from this software without specific prior written permission.
1555505Sshin *
1655505Sshin * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1755505Sshin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1855505Sshin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1955505Sshin * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
2055505Sshin * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2155505Sshin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2255505Sshin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2355505Sshin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2455505Sshin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2555505Sshin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2655505Sshin * SUCH DAMAGE.
2755505Sshin *
2855505Sshin *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
2955505Sshin */
3055505Sshin
3155505Sshin#include <sys/cdefs.h>
3255505Sshin__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 136743 2004-10-21 11:59:33Z cognet $");
3355505Sshin
3455505Sshin/*
3555505Sshin * MacPPC bus dma support routines
3655505Sshin */
3755505Sshin
3855505Sshin#define _ARM32_BUS_DMA_PRIVATE
3955505Sshin#include <sys/param.h>
4055505Sshin#include <sys/systm.h>
4155505Sshin#include <sys/malloc.h>
4255505Sshin#include <sys/bus.h>
4355505Sshin#include <sys/interrupt.h>
4455505Sshin#include <sys/lock.h>
4555505Sshin#include <sys/proc.h>
4655505Sshin#include <sys/mutex.h>
4755505Sshin#include <sys/mbuf.h>
4855505Sshin#include <sys/uio.h>
4955505Sshin
5055505Sshin#include <vm/vm.h>
5155505Sshin#include <vm/vm_page.h>
5255505Sshin#include <vm/vm_map.h>
5355505Sshin
5455505Sshin#include <machine/atomic.h>
5555505Sshin#include <machine/bus.h>
5655505Sshin#include <machine/cpufunc.h>
5755505Sshin
5855505Sshinstruct bus_dma_tag {
5955505Sshin	bus_dma_tag_t		parent;
6055505Sshin	bus_size_t		alignment;
6155505Sshin	bus_size_t		boundary;
6255505Sshin	bus_addr_t		lowaddr;
6355505Sshin	bus_addr_t		highaddr;
6455505Sshin	bus_dma_filter_t	*filter;
6555505Sshin	void			*filterarg;
6655505Sshin	bus_size_t		maxsize;
6755505Sshin	u_int			nsegments;
6855505Sshin	bus_size_t		maxsegsz;
6955505Sshin	int			flags;
7055505Sshin	int			ref_count;
7155505Sshin	int			map_count;
7255505Sshin	bus_dma_lock_t		*lockfunc;
7355505Sshin	void			*lockfuncarg;
7455505Sshin	/*
7555505Sshin	 * DMA range for this tag.  If the page doesn't fall within
7655505Sshin	 * one of these ranges, an error is returned.  The caller
7755505Sshin	 * may then decide what to do with the transfer.  If the
7855505Sshin	 * range pointer is NULL, it is ignored.
7955505Sshin	 */
8055505Sshin	struct arm32_dma_range	*ranges;
8155505Sshin	int			_nranges;
82253999Shrs};
8378064Sume
8455505Sshin#define DMAMAP_LINEAR		0x1
8555505Sshin#define DMAMAP_MBUF		0x2
8655505Sshin#define DMAMAP_UIO		0x4
8755505Sshin#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
8855505Sshin#define DMAMAP_COHERENT		0x8
8955505Sshinstruct bus_dmamap {
9055505Sshin        bus_dma_tag_t	dmat;
9155505Sshin	int		flags;
9255505Sshin	void 		*buffer;
9355505Sshin	int		len;
9455505Sshin};
9555505Sshin
9655505Sshin/*
9755505Sshin * Check to see if the specified page is in an allowed DMA range.
9855505Sshin */
99265778Smelifaro
10055505Sshinstatic int
10155505Sshinbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
10255505Sshin    bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
10355505Sshin    int flags, vm_offset_t *lastaddrp, int *segp,
10455505Sshin    int first);
10555505Sshin
10655505Sshinstatic __inline struct arm32_dma_range *
10755505Sshin_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
10855505Sshin    bus_addr_t curaddr)
10955505Sshin{
11055505Sshin	struct arm32_dma_range *dr;
11155505Sshin	int i;
112186119Sqingli
113186119Sqingli	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
114196866Sbz		if (curaddr >= dr->dr_sysbase &&
115186119Sqingli		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
116186119Sqingli			return (dr);
117100650Sjmallett	}
11862590Sitojun
11962590Sitojun	return (NULL);
12062590Sitojun}
12162590Sitojun/*
12262590Sitojun * Convenience function for manipulating driver locks from busdma (during
12355505Sshin * busdma_swi, for example).  Drivers that don't provide their own locks
124287097Shrs * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
125287097Shrs * non-mutex locking scheme don't have to use this at all.
12655505Sshin */
127265778Smelifarovoid
128287097Shrsbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
129287097Shrs{
130287097Shrs	struct mtx *dmtx;
131287097Shrs
132287097Shrs	dmtx = (struct mtx *)arg;
133173412Skevlo	switch (op) {
134173412Skevlo	case BUS_DMA_LOCK:
135287097Shrs		mtx_lock(dmtx);
136287097Shrs		break;
137287097Shrs	case BUS_DMA_UNLOCK:
138287097Shrs		mtx_unlock(dmtx);
139287097Shrs		break;
140287097Shrs	default:
141287097Shrs		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
142287097Shrs	}
143287097Shrs}
14462590Sitojun
145173412Skevlo/*
146173412Skevlo * dflt_lock should never get called.  It gets put into the dma tag when
14762590Sitojun * lockfunc == NULL, which is only valid if the maps that are associated
148173412Skevlo * with the tag are meant to never be defered.
149253999Shrs * XXX Should have a way to identify which driver is responsible here.
15055505Sshin */
15178064Sumestatic void
15278064Sumedflt_lock(void *arg, bus_dma_lock_op_t op)
15378064Sume{
15478064Sume#ifdef INVARIANTS
15578064Sume	panic("driver error: busdma dflt_lock called");
15678064Sume#else
15778064Sume	printf("DRIVER_ERROR: busdma dflt_lock called\n");
15855505Sshin#endif
159259169Sae}
16055505Sshin
161287097Shrs/*
162287097Shrs * Allocate a device specific dma_tag.
16355505Sshin */
16455505Sshin#define SEG_NB 1024
16555505Sshin
166122615Sumeint
167121156Sumebus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
16855505Sshin		   bus_size_t boundary, bus_addr_t lowaddr,
16955505Sshin		   bus_addr_t highaddr, bus_dma_filter_t *filter,
170122615Sume		   void *filterarg, bus_size_t maxsize, int nsegments,
171122615Sume		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
172122615Sume		   void *lockfuncarg, bus_dma_tag_t *dmat)
173122615Sume{
174122615Sume	bus_dma_tag_t newtag;
175122615Sume	int error = 0;
176122615Sume	/* Return a NULL tag on failure */
177122615Sume	*dmat = NULL;
178122615Sume
179122615Sume	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
180122615Sume	if (newtag == NULL)
181122615Sume		return (ENOMEM);
182122615Sume
18355505Sshin	newtag->parent = parent;
184122615Sume	newtag->alignment = alignment;
185265778Smelifaro	newtag->boundary = boundary;
186268827Speter	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
187265778Smelifaro	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
188122615Sume	newtag->filter = filter;
18955505Sshin	newtag->filterarg = filterarg;
190122615Sume        newtag->maxsize = maxsize;
191122615Sume        newtag->nsegments = nsegments;
192122615Sume	newtag->maxsegsz = maxsegsz;
193122615Sume	newtag->flags = flags;
194122615Sume	newtag->ref_count = 1; /* Count ourself */
19555505Sshin	newtag->map_count = 0;
19655505Sshin	newtag->ranges = bus_dma_get_range();
19755505Sshin	newtag->_nranges = bus_dma_get_range_nb();
19855505Sshin	if (lockfunc != NULL) {
19955505Sshin		newtag->lockfunc = lockfunc;
20055505Sshin		newtag->lockfuncarg = lockfuncarg;
20155505Sshin	} else {
202122615Sume		newtag->lockfunc = dflt_lock;
203122615Sume		newtag->lockfuncarg = NULL;
204122615Sume	}
205122615Sume        /*
206122615Sume	 * Take into account any restrictions imposed by our parent tag
20755505Sshin	 */
208122615Sume        if (parent != NULL) {
20955505Sshin                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
210122615Sume                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
211122615Sume		if (newtag->boundary == 0)
21255505Sshin			newtag->boundary = parent->boundary;
21355505Sshin		else if (parent->boundary != 0)
21455505Sshin                	newtag->boundary = min(parent->boundary,
21555505Sshin					       newtag->boundary);
21655505Sshin                if (newtag->filter == NULL) {
21755505Sshin                        /*
21855505Sshin                         * Short circuit looking at our parent directly
21955505Sshin                         * since we have encapsulated all of its information
220122615Sume                         */
221122615Sume                        newtag->filter = parent->filter;
222122615Sume                        newtag->filterarg = parent->filterarg;
223122615Sume                        newtag->parent = parent->parent;
22455505Sshin		}
225122615Sume		if (newtag->parent != NULL)
226122615Sume			atomic_add_int(&parent->ref_count, 1);
227122615Sume	}
228122615Sume
229122615Sume	*dmat = newtag;
230122615Sume	return (error);
231122615Sume}
232122615Sume
233122615Sumeint
234122615Sumebus_dma_tag_destroy(bus_dma_tag_t dmat)
235122615Sume{
236122615Sume	if (dmat != NULL) {
237122615Sume
238122615Sume                if (dmat->map_count != 0)
239122615Sume                        return (EBUSY);
240122615Sume
241122615Sume                while (dmat != NULL) {
242122615Sume                        bus_dma_tag_t parent;
243122615Sume
244122615Sume                        parent = dmat->parent;
245122615Sume                        atomic_subtract_int(&dmat->ref_count, 1);
246122615Sume                        if (dmat->ref_count == 0) {
247122615Sume                                free(dmat, M_DEVBUF);
248122615Sume                                /*
249122615Sume                                 * Last reference count, so
250122615Sume                                 * release our reference
251122615Sume                                 * count on our parent.
252122615Sume                                 */
253122615Sume                                dmat = parent;
254122615Sume                        } else
255122615Sume                                dmat = NULL;
256122615Sume                }
257122615Sume        }
258122615Sume        return (0);
25955505Sshin}
260122615Sume
261122615Sume/*
262122615Sume * Allocate a handle for mapping from kva/uva/physical
263122615Sume * address space into bus device space.
264122615Sume */
265122615Sumeint
266122615Sumebus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
267122615Sume{
268122615Sume	bus_dmamap_t newmap;
26955505Sshin
270122615Sume	newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
271122615Sume	if (newmap == NULL)
27255505Sshin		return (ENOMEM);
27355505Sshin	*mapp = newmap;
27455505Sshin	newmap->dmat = dmat;
275122615Sume	newmap->flags = 0;
276122615Sume	dmat->map_count++;
277122615Sume
278122615Sume	return (0);
279122615Sume}
28055505Sshin
281122615Sume/*
282122615Sume * Destroy a handle for mapping from kva/uva/physical
283122615Sume * address space into bus device space.
284122615Sume */
285122615Sumeint
286122615Sumebus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
28755505Sshin{
288122615Sume
289122615Sume	free(map, M_DEVBUF);
290122615Sume        dmat->map_count--;
291122615Sume        return (0);
292122615Sume}
293122615Sume
29455505Sshin/*
295122615Sume * Allocate a piece of memory that can be efficiently mapped into
296122615Sume * bus device space based on the constraints lited in the dma tag.
297122615Sume * A dmamap to for use with dmamap_load is also allocated.
298122615Sume */
299122615Sumeint
300122615Sumebus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
301122615Sume                 bus_dmamap_t *mapp)
302122615Sume{
30355505Sshin	bus_dmamap_t newmap = NULL;
30455505Sshin
30555505Sshin	int mflags;
30655505Sshin
30755505Sshin	if (flags & BUS_DMA_NOWAIT)
30855505Sshin		mflags = M_NOWAIT;
30955505Sshin	else
310265778Smelifaro		mflags = M_WAITOK;
311259169Sae	if (flags & BUS_DMA_ZERO)
31255505Sshin		mflags |= M_ZERO;
31355505Sshin
31455505Sshin	if (!*mapp) {
315265778Smelifaro		newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
31655505Sshin		if (newmap == NULL)
317265778Smelifaro			return (ENOMEM);
318265778Smelifaro		dmat->map_count++;
31955505Sshin		newmap->flags = 0;
32055505Sshin		*mapp = newmap;
32155505Sshin		newmap->dmat = dmat;
32255505Sshin	}
32355505Sshin
32455505Sshin        if (dmat->maxsize <= PAGE_SIZE) {
325167260Skevlo                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
326265778Smelifaro        } else {
327265778Smelifaro                /*
328265778Smelifaro                 * XXX Use Contigmalloc until it is merged into this facility
329265778Smelifaro                 *     and handles multi-seg allocations.  Nobody is doing
330265778Smelifaro                 *     multi-seg allocations yet though.
331122615Sume                 */
332122615Sume                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
33355505Sshin                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
334265778Smelifaro                    dmat->boundary);
33555505Sshin        }
33655505Sshin        if (*vaddr == NULL && newmap != NULL) {
33755505Sshin		free(newmap, M_DEVBUF);
33855505Sshin		dmat->map_count--;
33955505Sshin		*mapp = NULL;
34055505Sshin                return (ENOMEM);
34155505Sshin	}
34255505Sshin        return (0);
34355505Sshin}
34455505Sshin
345287097Shrs/*
34655505Sshin * Free a piece of memory and it's allocated dmamap, that was allocated
34755505Sshin * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
34855505Sshin */
34955505Sshinvoid
35055505Sshinbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
351121156Sume{
352121156Sume        if (dmat->maxsize <= PAGE_SIZE)
35355505Sshin		free(vaddr, M_DEVBUF);
35455505Sshin        else {
35555505Sshin		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
35655505Sshin	}
357287097Shrs	dmat->map_count--;
358287097Shrs	free(map, M_DEVBUF);
359287097Shrs}
360287097Shrs
361287097Shrs/*
362287097Shrs * Map the buffer buf into bus space using the dmamap map.
363287097Shrs */
364287097Shrsint
365287097Shrsbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
366287097Shrs                bus_size_t buflen, bus_dmamap_callback_t *callback,
367287097Shrs                void *callback_arg, int flags)
368287097Shrs{
369287097Shrs     	vm_offset_t	lastaddr = 0;
370287097Shrs	int		error, nsegs = 0;
371287097Shrs#ifdef __GNUC__
372287097Shrs	bus_dma_segment_t dm_segments[dmat->nsegments];
373287097Shrs#else
37455505Sshin	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
37555505Sshin#endif
376287097Shrs
37755505Sshin	map->flags &= ~DMAMAP_TYPE_MASK;
37855505Sshin	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
37955505Sshin	map->buffer = buf;
38055505Sshin	map->len = buflen;
381287097Shrs	error = bus_dmamap_load_buffer(dmat,
382259169Sae	    dm_segments, map, buf, buflen, NULL,
38355505Sshin	    flags, &lastaddr, &nsegs, 1);
38455505Sshin	if (error)
38555505Sshin		(*callback)(callback_arg, NULL, 0, error);
38655505Sshin	else
38755505Sshin		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
38855505Sshin
38955505Sshin	return (0);
39055505Sshin}
39155505Sshin
39255505Sshin/*
39355505Sshin * Utility function to load a linear buffer.  lastaddrp holds state
39455505Sshin * between invocations (for multiple-buffer loads).  segp contains
39555505Sshin * the starting segment on entrance, and the ending segment on exit.
39655505Sshin * first indicates if this is the first invocation of this function.
39755505Sshin */
39855505Sshinstatic int
39955505Sshinbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
40055505Sshin    bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
40155505Sshin    int flags, vm_offset_t *lastaddrp, int *segp,
40255505Sshin    int first)
40355505Sshin{
40455505Sshin	bus_size_t sgsize;
40555505Sshin	bus_addr_t curaddr, lastaddr, baddr, bmask;
40655505Sshin	vm_offset_t vaddr = (vm_offset_t)buf;
407243903Shrs	int seg;
408243903Shrs	int error = 0;
40955505Sshin	pmap_t pmap;
41055505Sshin	pd_entry_t *pde;
41155505Sshin	pt_entry_t pte;
41255505Sshin	pt_entry_t *ptep;
41355505Sshin
41455505Sshin	if (td != NULL)
415253999Shrs		pmap = vmspace_pmap(td->td_proc->p_vmspace);
416121156Sume	else
417253999Shrs		pmap = pmap_kernel();
418253970Shrs
41962590Sitojun	lastaddr = *lastaddrp;
42062590Sitojun	bmask = ~(dmat->boundary - 1);
42155505Sshin
42255505Sshin	for (seg = *segp; buflen > 0 ; ) {
42355505Sshin		/*
424121156Sume		 * Get the physical address for this segment.
425121156Sume		 *
42655505Sshin		 * XXX Don't support checking for coherent mappings
42755505Sshin		 * XXX in user address space.
428259171Sae		 */
42955505Sshin		if (__predict_true(pmap == pmap_kernel())) {
43055505Sshin			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
431122615Sume			if (__predict_false(pmap_pde_section(pde))) {
432122615Sume				curaddr = (*pde & L1_S_FRAME) |
433122615Sume				    (vaddr & L1_S_OFFSET);
434122615Sume				if (*pde & L1_S_CACHE_MASK) {
435210936Sjhb					map->flags &=
436122615Sume					    ~DMAMAP_COHERENT;
437122615Sume				}
43855505Sshin			} else {
43962590Sitojun				pte = *ptep;
44062590Sitojun				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
44155505Sshin				    ("INV type"));
44262590Sitojun				if (__predict_false((pte & L2_TYPE_MASK)
44355505Sshin						    == L2_TYPE_L)) {
44455505Sshin					curaddr = (pte & L2_L_FRAME) |
44555505Sshin					    (vaddr & L2_L_OFFSET);
44655505Sshin					if (pte & L2_L_CACHE_MASK) {
44755505Sshin						map->flags &=
44855505Sshin						    ~DMAMAP_COHERENT;
44955505Sshin
45055505Sshin					}
45155505Sshin				} else {
45255505Sshin					curaddr = (pte & L2_S_FRAME) |
45355505Sshin					    (vaddr & L2_S_OFFSET);
45455505Sshin					if (pte & L2_S_CACHE_MASK) {
45555505Sshin						map->flags &=
456287097Shrs						    ~DMAMAP_COHERENT;
457259169Sae					}
45855505Sshin				}
45955505Sshin			}
46055505Sshin		} else {
46155505Sshin			curaddr = pmap_extract(pmap, vaddr);
46255505Sshin			map->flags &= ~DMAMAP_COHERENT;
46355505Sshin		}
46455505Sshin
46555505Sshin		if (dmat->ranges) {
46655505Sshin			struct arm32_dma_range *dr;
46755505Sshin
46855505Sshin			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
469121156Sume			    curaddr);
47055505Sshin			if (dr == NULL)
47155505Sshin				return (EINVAL);
47255505Sshin			/*
473259176Sae		     	 * In a valid DMA range.  Translate the physical
474259176Sae			 * memory address to an address in the DMA window.
475259176Sae			 */
47655505Sshin			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
47755505Sshin
478121156Sume		}
479121156Sume		/*
48055505Sshin		 * Compute the segment size, and adjust counts.
48155505Sshin		 */
48255505Sshin		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
48355505Sshin		if (buflen < sgsize)
48455505Sshin			sgsize = buflen;
48555505Sshin
48655505Sshin		/*
48755505Sshin		 * Make sure we don't cross any boundaries.
488287097Shrs		 */
489259169Sae		if (dmat->boundary > 0) {
49055505Sshin			baddr = (curaddr + dmat->boundary) & bmask;
49155505Sshin			if (sgsize > (baddr - curaddr))
49255505Sshin				sgsize = (baddr - curaddr);
493186119Sqingli		}
49455505Sshin
49555505Sshin		/*
49655505Sshin		 * Insert chunk into a segment, coalescing with
49755505Sshin		 * the previous segment if possible.
49855505Sshin		 */
49955505Sshin		if (first) {
50055505Sshin			segs[seg].ds_addr = curaddr;
50155505Sshin			segs[seg].ds_len = sgsize;
50255505Sshin			first = 0;
50355505Sshin		} else {
50455505Sshin			if (curaddr == lastaddr &&
50555505Sshin			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
506121156Sume			    (dmat->boundary == 0 ||
50755505Sshin			     (segs[seg].ds_addr & bmask) ==
50855505Sshin			     (curaddr & bmask))) {
50955505Sshin				segs[seg].ds_len += sgsize;
510243903Shrs				goto segdone;
511243903Shrs			}
51255505Sshin			else {
513121156Sume				if (++seg >= dmat->nsegments)
514121156Sume					break;
51555505Sshin				segs[seg].ds_addr = curaddr;
51655505Sshin				segs[seg].ds_len = sgsize;
517259171Sae			}
51855505Sshin		}
51955505Sshin
52062590Sitojun		if (error)
52178064Sume			break;
52255505Sshinsegdone:
52362590Sitojun		lastaddr = curaddr + sgsize;
52462590Sitojun		vaddr += sgsize;
52555505Sshin		buflen -= sgsize;
52662590Sitojun	}
52755505Sshin
52855505Sshin	*segp = seg;
52955505Sshin	*lastaddrp = lastaddr;
53055505Sshin
53155505Sshin	/*
532186119Sqingli	 * Did we fit?
533186119Sqingli	 */
534186119Sqingli	if (buflen != 0)
535186119Sqingli		error = EFBIG; /* XXX better return value here? */
536186119Sqingli	return (error);
537186500Sqingli}
53855505Sshin
539243903Shrs/*
540243903Shrs * Like bus_dmamap_load(), but for mbufs.
541121156Sume */
542121156Sumeint
54355505Sshinbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
54455505Sshin		     bus_dmamap_callback2_t *callback, void *callback_arg,
54555505Sshin		     int flags)
54655505Sshin{
54755505Sshin#ifdef __GNUC__
54855505Sshin	bus_dma_segment_t dm_segments[dmat->nsegments];
549122615Sume#else
55078064Sume	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
55178064Sume#endif
55278064Sume	int nsegs = 0, error = 0;
55355505Sshin
55455505Sshin	M_ASSERTPKTHDR(m0);
55555505Sshin
556287097Shrs	map->flags &= ~DMAMAP_TYPE_MASK;
557259176Sae	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
55855505Sshin	map->buffer = m0;
55955505Sshin	if (m0->m_pkthdr.len <= dmat->maxsize) {
56055505Sshin		int first = 1;
56162590Sitojun		vm_offset_t lastaddr = 0;
56255505Sshin		struct mbuf *m;
56355505Sshin
56455505Sshin		for (m = m0; m != NULL && error == 0; m = m->m_next) {
56555505Sshin			if (m->m_len > 0) {
566253999Shrs				error = bus_dmamap_load_buffer(dmat,
567292333Smelifaro				    dm_segments, map, m->m_data, m->m_len, NULL,
56855505Sshin				    flags, &lastaddr, &nsegs, first);
56978064Sume				first = 0;
57078064Sume			}
57162590Sitojun		}
57278064Sume	} else {
57355505Sshin		error = EINVAL;
57455505Sshin	}
57566865Ssumikawa
576122615Sume	if (error) {
57778064Sume		/*
578122615Sume		 * force "no valid mappings" on error in callback.
57955505Sshin		 */
58055505Sshin		(*callback)(callback_arg, dm_segments, 0, 0, error);
58155505Sshin	} else {
58255505Sshin		(*callback)(callback_arg, dm_segments, nsegs + 1,
58355505Sshin		    m0->m_pkthdr.len, error);
58455505Sshin	}
58555505Sshin	return (error);
586186119Sqingli}
58755505Sshin
588186119Sqingli/*
589186119Sqingli * Like bus_dmamap_load(), but for uios.
590186119Sqingli */
59155505Sshinint
59255505Sshinbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
59355505Sshin    bus_dmamap_callback2_t *callback, void *callback_arg,
59455505Sshin    int flags)
595121156Sume{
59655505Sshin	vm_offset_t lastaddr;
59755505Sshin#ifdef __GNUC__
59855505Sshin	bus_dma_segment_t dm_segments[dmat->nsegments];
59955505Sshin#else
60055505Sshin	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
60155505Sshin#endif
60255505Sshin	int nsegs, i, error, first;
60355505Sshin	bus_size_t resid;
60455505Sshin	struct iovec *iov;
60555505Sshin	struct thread *td = NULL;
60655505Sshin
607259171Sae	resid = uio->uio_resid;
60878064Sume	iov = uio->uio_iov;
60978064Sume	map->flags &= ~DMAMAP_TYPE_MASK;
61078064Sume	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
61178064Sume	map->buffer = uio;
61278064Sume
61378064Sume	if (uio->uio_segflg == UIO_USERSPACE) {
61478064Sume		td = uio->uio_td;
61578064Sume		KASSERT(td != NULL,
616121156Sume		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
61778064Sume	}
61878064Sume
61978064Sume	first = 1;
62078064Sume	nsegs = error = 0;
62178064Sume	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
62278064Sume		/*
62378064Sume		 * Now at the first iovec to load.  Load each iovec
62478064Sume		 * until we have exhausted the residual count.
625122615Sume		 */
626122615Sume		bus_size_t minlen =
627122615Sume		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
62855505Sshin		caddr_t addr = (caddr_t) iov[i].iov_base;
629259176Sae
630259176Sae		if (minlen > 0) {
631259176Sae			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
63255505Sshin			    addr, minlen, td, flags, &lastaddr, &nsegs, first);
63355505Sshin
63455505Sshin			first = 0;
63555505Sshin
63655505Sshin			resid -= minlen;
63755505Sshin		}
63855505Sshin	}
63955505Sshin
64055505Sshin	if (error) {
64155505Sshin		/*
64255505Sshin		 * force "no valid mappings" on error in callback.
643121156Sume		 */
644122615Sume		(*callback)(callback_arg, dm_segments, 0, 0, error);
64581366Ssumikawa	} else {
64681366Ssumikawa		(*callback)(callback_arg, dm_segments, nsegs+1,
64781366Ssumikawa		    uio->uio_resid, error);
648122615Sume	}
649122615Sume
650122615Sume	return (error);
65181366Ssumikawa}
652288297Smelifaro
653288297Smelifaro/*
65466865Ssumikawa * Release the mapping held by map.
65581366Ssumikawa */
65666865Ssumikawavoid
65766865Ssumikawabus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
658253999Shrs{
65955505Sshin	map->flags &= ~DMAMAP_TYPE_MASK;
660253970Shrs	return;
66155505Sshin}
66278064Sume
66378064Sumestatic void
66478064Sumebus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
66578064Sume{
66678064Sume
66778064Sume	if (op & BUS_DMASYNC_POSTREAD ||
66878064Sume	    op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
66978064Sume		cpu_dcache_wbinv_range((vm_offset_t)buf, len);
67078064Sume		return;
67178064Sume	}
67278064Sume	if (op & BUS_DMASYNC_PREWRITE)
67378064Sume		cpu_dcache_wb_range((vm_offset_t)buf, len);
67455505Sshin	if (op & BUS_DMASYNC_PREREAD) {
67578064Sume		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
67678064Sume 			cpu_dcache_inv_range((vm_offset_t)buf, len);
67755505Sshin		else
678289677Seadler			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
679292333Smelifaro	}
680292333Smelifaro}
681292333Smelifaro
682292333Smelifarovoid
683292333Smelifarobus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
684292333Smelifaro{
685292333Smelifaro	struct mbuf *m;
68655505Sshin	struct uio *uio;
687292333Smelifaro	int resid;
688292333Smelifaro	struct iovec *iov;
689292333Smelifaro
690292333Smelifaro	if (op == BUS_DMASYNC_POSTWRITE)
69178064Sume		return;
692292333Smelifaro	if (map->flags & DMAMAP_COHERENT)
693292333Smelifaro		return;
694292333Smelifaro	switch(map->flags & DMAMAP_TYPE_MASK) {
69578064Sume	case DMAMAP_LINEAR:
696292333Smelifaro		bus_dmamap_sync_buf(map->buffer, map->len, op);
697292333Smelifaro		break;
698292333Smelifaro	case DMAMAP_MBUF:
699292333Smelifaro		m = map->buffer;
700292333Smelifaro		while (m) {
701292333Smelifaro			bus_dmamap_sync_buf(m->m_data, m->m_len, op);
702292333Smelifaro			m = m->m_next;
703292333Smelifaro		}
704292333Smelifaro		break;
705292333Smelifaro	case DMAMAP_UIO:
706292333Smelifaro		uio = map->buffer;
707292333Smelifaro		iov = uio->uio_iov;
708292333Smelifaro		resid = uio->uio_resid;
709292333Smelifaro		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
710292333Smelifaro			bus_size_t minlen = resid < iov[i].iov_len ? resid :
711292333Smelifaro			    iov[i].iov_len;
712292333Smelifaro			if (minlen > 0) {
713292333Smelifaro				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
71455505Sshin				    op);
71555505Sshin				resid -= minlen;
716292333Smelifaro			}
717292333Smelifaro		}
718292333Smelifaro		break;
71962590Sitojun	default:
72062590Sitojun		break;
72162590Sitojun	}
72262590Sitojun	cpu_drain_writebuf();
72362590Sitojun}
724121156Sume