busdma_machdep-v4.c revision 134934
1129198Scognet/*
2129198Scognet * Copyright (c) 2004 Olivier Houchard
3129198Scognet * Copyright (c) 2002 Peter Grehan
4129198Scognet * Copyright (c) 1997, 1998 Justin T. Gibbs.
5129198Scognet * All rights reserved.
6129198Scognet *
7129198Scognet * Redistribution and use in source and binary forms, with or without
8129198Scognet * modification, are permitted provided that the following conditions
9129198Scognet * are met:
10129198Scognet * 1. Redistributions of source code must retain the above copyright
11129198Scognet *    notice, this list of conditions, and the following disclaimer,
12129198Scognet *    without modification, immediately at the beginning of the file.
13129198Scognet * 2. The name of the author may not be used to endorse or promote products
14129198Scognet *    derived from this software without specific prior written permission.
15129198Scognet *
16129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18129198Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19129198Scognet * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20129198Scognet * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21129198Scognet * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22129198Scognet * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26129198Scognet * SUCH DAMAGE.
27129198Scognet *
28129198Scognet *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29129198Scognet */
30129198Scognet
31129198Scognet#include <sys/cdefs.h>
32129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 134934 2004-09-08 04:54:19Z scottl $");
33129198Scognet
34129198Scognet/*
35129198Scognet * MacPPC bus dma support routines
36129198Scognet */
37129198Scognet
38129198Scognet#define _ARM32_BUS_DMA_PRIVATE
39129198Scognet#include <sys/param.h>
40129198Scognet#include <sys/systm.h>
41129198Scognet#include <sys/malloc.h>
42129198Scognet#include <sys/bus.h>
43129198Scognet#include <sys/interrupt.h>
44129198Scognet#include <sys/lock.h>
45129198Scognet#include <sys/proc.h>
46129198Scognet#include <sys/mutex.h>
47129198Scognet#include <sys/mbuf.h>
48129198Scognet#include <sys/uio.h>
49129198Scognet
50129198Scognet#include <vm/vm.h>
51129198Scognet#include <vm/vm_page.h>
52129198Scognet#include <vm/vm_map.h>
53129198Scognet
54129198Scognet#include <machine/atomic.h>
55129198Scognet#include <machine/bus.h>
56129198Scognet#include <machine/cpufunc.h>
57129198Scognet
58129198Scognetstruct bus_dma_tag {
59129198Scognet	bus_dma_tag_t		parent;
60129198Scognet	bus_size_t		alignment;
61129198Scognet	bus_size_t		boundary;
62129198Scognet	bus_addr_t		lowaddr;
63129198Scognet	bus_addr_t		highaddr;
64129198Scognet	bus_dma_filter_t	*filter;
65129198Scognet	void			*filterarg;
66129198Scognet	bus_size_t		maxsize;
67129198Scognet	u_int			nsegments;
68129198Scognet	bus_size_t		maxsegsz;
69129198Scognet	int			flags;
70129198Scognet	int			ref_count;
71129198Scognet	int			map_count;
72129198Scognet	bus_dma_lock_t		*lockfunc;
73129198Scognet	void			*lockfuncarg;
74129198Scognet	/*
75129198Scognet	 * DMA range for this tag.  If the page doesn't fall within
76129198Scognet	 * one of these ranges, an error is returned.  The caller
77129198Scognet	 * may then decide what to do with the transfer.  If the
78129198Scognet	 * range pointer is NULL, it is ignored.
79129198Scognet	 */
80129198Scognet	struct arm32_dma_range	*ranges;
81129198Scognet	int			_nranges;
82129198Scognet
83129198Scognet};
84129198Scognet
85129198Scognetstruct arm_seglist {
86129198Scognet	bus_dma_segment_t		seg;
87129198Scognet	SLIST_ENTRY(arm_seglist)	next;
88129198Scognet};
89129198Scognet
90129198Scognet#define MAX_SEGS 512
91129198Scognetstruct bus_dmamap {
92129198Scognet        bus_dma_tag_t			dmat;
93129198Scognet	int				flags;
94129198Scognet	SLIST_HEAD(, arm_seglist)	seglist;
95129198Scognet};
96129198Scognet
97129198Scognet/*
98129198Scognet * Check to see if the specified page is in an allowed DMA range.
99129198Scognet */
100129198Scognet
101129198Scognetstatic int
102129198Scognetbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
103129198Scognet    bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
104129198Scognet    int flags, vm_offset_t *lastaddrp, int *segp,
105129198Scognet    int first);
106129198Scognetstatic __inline struct arm32_dma_range *
107129198Scognet_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
108129198Scognet    bus_addr_t curaddr)
109129198Scognet{
110129198Scognet	struct arm32_dma_range *dr;
111129198Scognet	int i;
112129198Scognet
113129198Scognet	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
114129198Scognet		if (curaddr >= dr->dr_sysbase &&
115129198Scognet		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
116129198Scognet			return (dr);
117129198Scognet	}
118129198Scognet
119129198Scognet	return (NULL);
120129198Scognet}
121129198Scognet/*
122129198Scognet * Convenience function for manipulating driver locks from busdma (during
123129198Scognet * busdma_swi, for example).  Drivers that don't provide their own locks
124129198Scognet * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
125129198Scognet * non-mutex locking scheme don't have to use this at all.
126129198Scognet */
127129198Scognetvoid
128129198Scognetbusdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
129129198Scognet{
130129198Scognet	struct mtx *dmtx;
131129198Scognet
132129198Scognet	dmtx = (struct mtx *)arg;
133129198Scognet	switch (op) {
134129198Scognet	case BUS_DMA_LOCK:
135129198Scognet		mtx_lock(dmtx);
136129198Scognet		break;
137129198Scognet	case BUS_DMA_UNLOCK:
138129198Scognet		mtx_unlock(dmtx);
139129198Scognet		break;
140129198Scognet	default:
141129198Scognet		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
142129198Scognet	}
143129198Scognet}
144129198Scognet
145129198Scognet/*
146129198Scognet * dflt_lock should never get called.  It gets put into the dma tag when
147129198Scognet * lockfunc == NULL, which is only valid if the maps that are associated
148129198Scognet * with the tag are meant to never be defered.
149129198Scognet * XXX Should have a way to identify which driver is responsible here.
150129198Scognet */
151129198Scognetstatic void
152129198Scognetdflt_lock(void *arg, bus_dma_lock_op_t op)
153129198Scognet{
154129198Scognet#ifdef INVARIANTS
155129198Scognet	panic("driver error: busdma dflt_lock called");
156129198Scognet#else
157129198Scognet	printf("DRIVER_ERROR: busdma dflt_lock called\n");
158129198Scognet#endif
159129198Scognet}
160129198Scognet
161129198Scognet/*
162129198Scognet * Allocate a device specific dma_tag.
163129198Scognet */
164129198Scognetint
165129198Scognetbus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
166129198Scognet		   bus_size_t boundary, bus_addr_t lowaddr,
167129198Scognet		   bus_addr_t highaddr, bus_dma_filter_t *filter,
168129198Scognet		   void *filterarg, bus_size_t maxsize, int nsegments,
169129198Scognet		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
170129198Scognet		   void *lockfuncarg, bus_dma_tag_t *dmat)
171129198Scognet{
172129198Scognet	bus_dma_tag_t newtag;
173129198Scognet	int error = 0;
174129198Scognet
175129198Scognet	/* Return a NULL tag on failure */
176129198Scognet	*dmat = NULL;
177129198Scognet
178129198Scognet	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
179129198Scognet	if (newtag == NULL)
180129198Scognet		return (ENOMEM);
181129198Scognet
182129198Scognet	newtag->parent = parent;
183129198Scognet	newtag->alignment = alignment;
184129198Scognet	newtag->boundary = boundary;
185129198Scognet	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
186129198Scognet	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
187129198Scognet	newtag->filter = filter;
188129198Scognet	newtag->filterarg = filterarg;
189129198Scognet        newtag->maxsize = maxsize;
190129198Scognet        newtag->nsegments = nsegments;
191129198Scognet	newtag->maxsegsz = maxsegsz;
192129198Scognet	newtag->flags = flags;
193129198Scognet	newtag->ref_count = 1; /* Count ourself */
194129198Scognet	newtag->map_count = 0;
195129198Scognet	newtag->ranges = bus_dma_get_range();
196129198Scognet	if (lockfunc != NULL) {
197129198Scognet		newtag->lockfunc = lockfunc;
198129198Scognet		newtag->lockfuncarg = lockfuncarg;
199129198Scognet	} else {
200129198Scognet		newtag->lockfunc = dflt_lock;
201129198Scognet		newtag->lockfuncarg = NULL;
202129198Scognet	}
203129198Scognet
204129198Scognet        /*
205129198Scognet	 * Take into account any restrictions imposed by our parent tag
206129198Scognet	 */
207129198Scognet        if (parent != NULL) {
208129198Scognet                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
209129198Scognet                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
210134934Sscottl		if (newtag->boundary == 0)
211134934Sscottl			newtag->boundary = parent->boundary;
212134934Sscottl		else if (parent->boundary != 0)
213134934Sscottl                	newtag->boundary = min(parent->boundary,
214134934Sscottl					       newtag->boundary);
215129198Scognet                if (newtag->filter == NULL) {
216129198Scognet                        /*
217129198Scognet                         * Short circuit looking at our parent directly
218129198Scognet                         * since we have encapsulated all of its information
219129198Scognet                         */
220129198Scognet                        newtag->filter = parent->filter;
221129198Scognet                        newtag->filterarg = parent->filterarg;
222129198Scognet                        newtag->parent = parent->parent;
223129198Scognet		}
224129198Scognet		if (newtag->parent != NULL)
225129198Scognet			atomic_add_int(&parent->ref_count, 1);
226129198Scognet	}
227129198Scognet
228129198Scognet	*dmat = newtag;
229129198Scognet	return (error);
230129198Scognet}
231129198Scognet
232129198Scognetint
233129198Scognetbus_dma_tag_destroy(bus_dma_tag_t dmat)
234129198Scognet{
235129198Scognet	if (dmat != NULL) {
236129198Scognet
237129198Scognet                if (dmat->map_count != 0)
238129198Scognet                        return (EBUSY);
239129198Scognet
240129198Scognet                while (dmat != NULL) {
241129198Scognet                        bus_dma_tag_t parent;
242129198Scognet
243129198Scognet                        parent = dmat->parent;
244129198Scognet                        atomic_subtract_int(&dmat->ref_count, 1);
245129198Scognet                        if (dmat->ref_count == 0) {
246129198Scognet                                free(dmat, M_DEVBUF);
247129198Scognet                                /*
248129198Scognet                                 * Last reference count, so
249129198Scognet                                 * release our reference
250129198Scognet                                 * count on our parent.
251129198Scognet                                 */
252129198Scognet                                dmat = parent;
253129198Scognet                        } else
254129198Scognet                                dmat = NULL;
255129198Scognet                }
256129198Scognet        }
257129198Scognet        return (0);
258129198Scognet}
259129198Scognet
260129198Scognetstatic void
261129198Scognetarm_dmamap_freesegs(bus_dmamap_t map)
262129198Scognet{
263129198Scognet	struct arm_seglist *seg = SLIST_FIRST(&map->seglist);
264129198Scognet
265129198Scognet	while (seg) {
266129198Scognet		struct arm_seglist *next;
267129198Scognet
268129198Scognet		next = SLIST_NEXT(seg, next);
269129198Scognet		SLIST_REMOVE_HEAD(&map->seglist, next);
270129198Scognet		free(seg, M_DEVBUF);
271129198Scognet		seg = next;
272129198Scognet	}
273129198Scognet}
274129198Scognet
275129198Scognetstatic int
276129198Scognetarm_dmamap_addseg(bus_dmamap_t map, vm_offset_t addr, vm_size_t size)
277129198Scognet{
278129198Scognet	struct arm_seglist *seg = malloc(sizeof(*seg), M_DEVBUF, M_NOWAIT);
279129198Scognet
280129198Scognet	if (!seg)
281129198Scognet		return (ENOMEM);
282129198Scognet	seg->seg.ds_addr = addr;
283129198Scognet	seg->seg.ds_len = size;
284129198Scognet	SLIST_INSERT_HEAD(&map->seglist, seg, next);
285129198Scognet	return (0);
286129198Scognet}
287129198Scognet
288129198Scognet/*
289129198Scognet * Allocate a handle for mapping from kva/uva/physical
290129198Scognet * address space into bus device space.
291129198Scognet */
292129198Scognetint
293129198Scognetbus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
294129198Scognet{
295129198Scognet	bus_dmamap_t newmap;
296129198Scognet
297129198Scognet	newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
298129198Scognet	if (newmap == NULL)
299129198Scognet		return (ENOMEM);
300129198Scognet	SLIST_INIT(&newmap->seglist);
301129198Scognet	*mapp = newmap;
302129198Scognet	dmat->map_count++;
303129198Scognet
304129198Scognet	return (0);
305129198Scognet}
306129198Scognet
307129198Scognet/*
308129198Scognet * Destroy a handle for mapping from kva/uva/physical
309129198Scognet * address space into bus device space.
310129198Scognet */
311129198Scognetint
312129198Scognetbus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
313129198Scognet{
314129198Scognet	arm_dmamap_freesegs(map);
315129198Scognet	free(map, M_DEVBUF);
316129198Scognet        dmat->map_count--;
317129198Scognet        return (0);
318129198Scognet}
319129198Scognet
320129198Scognet/*
321129198Scognet * Allocate a piece of memory that can be efficiently mapped into
322129198Scognet * bus device space based on the constraints lited in the dma tag.
323129198Scognet * A dmamap to for use with dmamap_load is also allocated.
324129198Scognet */
325129198Scognetint
326129198Scognetbus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
327129198Scognet                 bus_dmamap_t *mapp)
328129198Scognet{
329129198Scognet	bus_dmamap_t newmap;
330129198Scognet
331129198Scognet	int mflags;
332129198Scognet
333129198Scognet	if (flags & BUS_DMA_NOWAIT)
334129198Scognet		mflags = M_NOWAIT;
335129198Scognet	else
336129198Scognet		mflags = M_WAITOK;
337129198Scognet	if (flags & BUS_DMA_ZERO)
338129198Scognet		mflags |= M_ZERO;
339129198Scognet
340129198Scognet	newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
341129198Scognet	if (newmap == NULL)
342129198Scognet		return (ENOMEM);
343129198Scognet	SLIST_INIT(&newmap->seglist);
344129198Scognet	*mapp = newmap;
345129198Scognet        if (dmat->maxsize <= PAGE_SIZE) {
346129198Scognet                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
347129198Scognet        } else {
348129198Scognet                /*
349129198Scognet                 * XXX Use Contigmalloc until it is merged into this facility
350129198Scognet                 *     and handles multi-seg allocations.  Nobody is doing
351129198Scognet                 *     multi-seg allocations yet though.
352129198Scognet                 */
353129198Scognet                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
354129198Scognet                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
355129198Scognet                    dmat->boundary);
356129198Scognet        }
357129198Scognet
358129198Scognet        if (*vaddr == NULL) {
359129198Scognet		free(newmap, M_DEVBUF);
360129198Scognet		*mapp = NULL;
361129198Scognet                return (ENOMEM);
362129198Scognet	}
363129198Scognet
364129198Scognet        return (0);
365129198Scognet}
366129198Scognet
367129198Scognet/*
368129198Scognet * Free a piece of memory and it's allocated dmamap, that was allocated
369129198Scognet * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
370129198Scognet */
371129198Scognetvoid
372129198Scognetbus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
373129198Scognet{
374129198Scognet        if (map != NULL)
375129198Scognet                panic("bus_dmamem_free: Invalid map freed\n");
376129198Scognet        if (dmat->maxsize <= PAGE_SIZE)
377129198Scognet		free(vaddr, M_DEVBUF);
378129198Scognet        else {
379129198Scognet		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
380129198Scognet	}
381129198Scognet	arm_dmamap_freesegs(map);
382129198Scognet	free(map, M_DEVBUF);
383129198Scognet}
384129198Scognet
385129198Scognet/*
386129198Scognet * Map the buffer buf into bus space using the dmamap map.
387129198Scognet */
388129198Scognetint
389129198Scognetbus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
390129198Scognet                bus_size_t buflen, bus_dmamap_callback_t *callback,
391129198Scognet                void *callback_arg, int flags)
392129198Scognet{
393129198Scognet     	vm_offset_t	lastaddr = 0;
394129198Scognet	int		error, nsegs = 0;
395129198Scognet#ifdef __GNUC__
396129198Scognet	bus_dma_segment_t dm_segments[dmat->nsegments];
397129198Scognet#else
398129198Scognet	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
399129198Scognet#endif
400129198Scognet
401129198Scognet	error = bus_dmamap_load_buffer(dmat,
402129198Scognet	    dm_segments, map, buf, buflen, NULL,
403129198Scognet	    flags, &lastaddr, &nsegs, 1);
404129198Scognet	(*callback)(callback_arg, dm_segments, nsegs, error);
405129198Scognet
406129198Scognet	return (0);
407129198Scognet}
408129198Scognet
409129198Scognet/*
410129198Scognet * Utility function to load a linear buffer.  lastaddrp holds state
411129198Scognet * between invocations (for multiple-buffer loads).  segp contains
412129198Scognet * the starting segment on entrance, and the ending segment on exit.
413129198Scognet * first indicates if this is the first invocation of this function.
414129198Scognet */
415129198Scognetstatic int
416129198Scognetbus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
417129198Scognet    bus_dmamap_t map, void *buf, bus_size_t buflen, struct thread *td,
418129198Scognet    int flags, vm_offset_t *lastaddrp, int *segp,
419129198Scognet    int first)
420129198Scognet{
421129198Scognet	bus_size_t sgsize;
422129198Scognet	bus_addr_t curaddr, lastaddr, baddr, bmask;
423129198Scognet	vm_offset_t vaddr = (vm_offset_t)buf;
424129198Scognet	int seg;
425129198Scognet	int error = 0;
426129198Scognet	pmap_t pmap;
427129198Scognet	pd_entry_t *pde;
428129198Scognet	pt_entry_t pte;
429129198Scognet	pt_entry_t *ptep;
430129198Scognet
431129198Scognet
432129198Scognet	if (td != NULL)
433129198Scognet		pmap = vmspace_pmap(td->td_proc->p_vmspace);
434129198Scognet	else
435132514Scognet		pmap = pmap_kernel();
436129198Scognet
437129198Scognet	lastaddr = *lastaddrp;
438129198Scognet	bmask = ~(dmat->boundary - 1);
439129198Scognet
440129198Scognet	for (seg = *segp; buflen > 0 ; ) {
441129198Scognet		/*
442129198Scognet		 * Get the physical address for this segment.
443129198Scognet		 *
444129198Scognet		 * XXX Don't support checking for coherent mappings
445129198Scognet		 * XXX in user address space.
446129198Scognet		 */
447129198Scognet		if (__predict_true(pmap == pmap_kernel())) {
448129198Scognet			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
449129198Scognet			if (__predict_false(pmap_pde_section(pde))) {
450129198Scognet				curaddr = (*pde & L1_S_FRAME) |
451129198Scognet				    (vaddr & L1_S_OFFSET);
452129198Scognet				if (*pde & L1_S_CACHE_MASK) {
453129198Scognet					map->flags &=
454129198Scognet					    ~ARM32_DMAMAP_COHERENT;
455129198Scognet				}
456129198Scognet			} else {
457129198Scognet				pte = *ptep;
458129198Scognet				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
459132514Scognet				    ("INV type"));
460129198Scognet				if (__predict_false((pte & L2_TYPE_MASK)
461129198Scognet						    == L2_TYPE_L)) {
462129198Scognet					curaddr = (pte & L2_L_FRAME) |
463129198Scognet					    (vaddr & L2_L_OFFSET);
464129198Scognet					if (pte & L2_L_CACHE_MASK) {
465129198Scognet						map->flags &=
466129198Scognet						    ~ARM32_DMAMAP_COHERENT;
467129198Scognet					}
468129198Scognet				} else {
469129198Scognet					curaddr = (pte & L2_S_FRAME) |
470129198Scognet					    (vaddr & L2_S_OFFSET);
471129198Scognet					if (pte & L2_S_CACHE_MASK) {
472129198Scognet						map->flags &=
473129198Scognet						    ~ARM32_DMAMAP_COHERENT;
474129198Scognet					}
475129198Scognet				}
476129198Scognet			}
477129198Scognet		} else {
478129198Scognet			curaddr = pmap_extract(pmap, vaddr);
479129198Scognet			map->flags &= ~ARM32_DMAMAP_COHERENT;
480129198Scognet		}
481129198Scognet
482129198Scognet		/*
483129198Scognet		 * Compute the segment size, and adjust counts.
484129198Scognet		 */
485129198Scognet		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
486129198Scognet		if (buflen < sgsize)
487129198Scognet			sgsize = buflen;
488129198Scognet
489129198Scognet		/*
490129198Scognet		 * Make sure we don't cross any boundaries.
491129198Scognet		 */
492129198Scognet		if (dmat->boundary > 0) {
493129198Scognet			baddr = (curaddr + dmat->boundary) & bmask;
494129198Scognet			if (sgsize > (baddr - curaddr))
495129198Scognet				sgsize = (baddr - curaddr);
496129198Scognet		}
497129198Scognet
498129198Scognet		/*
499129198Scognet		 * Insert chunk into a segment, coalescing with
500129198Scognet		 * the previous segment if possible.
501129198Scognet		 */
502129198Scognet		error = arm_dmamap_addseg(map,
503129198Scognet		    (vm_offset_t)curaddr, sgsize);
504129198Scognet		if (error)
505129198Scognet			break;
506129198Scognet
507129198Scognet		if (first) {
508129198Scognet			segs[seg].ds_addr = curaddr;
509129198Scognet			segs[seg].ds_len = sgsize;
510129198Scognet			first = 0;
511129198Scognet		} else {
512129198Scognet			if (curaddr == lastaddr &&
513129198Scognet			    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
514129198Scognet			    (dmat->boundary == 0 ||
515129198Scognet			     (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
516129198Scognet				segs[seg].ds_len += sgsize;
517129198Scognet			else {
518129198Scognet				if (++seg >= dmat->nsegments)
519129198Scognet					break;
520129198Scognet				segs[seg].ds_addr = curaddr;
521129198Scognet				segs[seg].ds_len = sgsize;
522129198Scognet			}
523129198Scognet		}
524129198Scognet
525129198Scognet		lastaddr = curaddr + sgsize;
526129198Scognet		vaddr += sgsize;
527129198Scognet		buflen -= sgsize;
528129198Scognet	}
529129198Scognet
530129198Scognet	*segp = seg;
531129198Scognet	*lastaddrp = lastaddr;
532129198Scognet
533129198Scognet	/*
534129198Scognet	 * Did we fit?
535129198Scognet	 */
536129198Scognet	if (buflen != 0)
537129198Scognet		error = EFBIG; /* XXX better return value here? */
538129198Scognet	return (error);
539129198Scognet}
540129198Scognet
541129198Scognet/*
542129198Scognet * Like bus_dmamap_load(), but for mbufs.
543129198Scognet */
544129198Scognetint
545129198Scognetbus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
546129198Scognet		     bus_dmamap_callback2_t *callback, void *callback_arg,
547129198Scognet		     int flags)
548129198Scognet{
549129198Scognet#ifdef __GNUC__
550129198Scognet	bus_dma_segment_t dm_segments[dmat->nsegments];
551129198Scognet#else
552129198Scognet	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
553129198Scognet#endif
554129198Scognet	int nsegs = 0, error = 0;
555129198Scognet
556129198Scognet	M_ASSERTPKTHDR(m0);
557129198Scognet
558129198Scognet	if (m0->m_pkthdr.len <= dmat->maxsize) {
559129198Scognet		int first = 1;
560129198Scognet		vm_offset_t lastaddr = 0;
561129198Scognet		struct mbuf *m;
562129198Scognet
563129198Scognet		for (m = m0; m != NULL && error == 0; m = m->m_next) {
564129198Scognet			if (m->m_len > 0) {
565129198Scognet				error = bus_dmamap_load_buffer(dmat,
566129198Scognet				    dm_segments, map, m->m_data, m->m_len, NULL,
567129198Scognet				    flags, &lastaddr, &nsegs, first);
568129198Scognet				first = 0;
569129198Scognet			}
570129198Scognet		}
571129198Scognet	} else {
572129198Scognet		error = EINVAL;
573129198Scognet	}
574129198Scognet
575129198Scognet	if (error) {
576129198Scognet		/*
577129198Scognet		 * force "no valid mappings" on error in callback.
578129198Scognet		 */
579129198Scognet		(*callback)(callback_arg, dm_segments, 0, 0, error);
580129198Scognet	} else {
581129198Scognet		(*callback)(callback_arg, dm_segments, nsegs+1,
582129198Scognet		    m0->m_pkthdr.len, error);
583129198Scognet	}
584129198Scognet	return (error);
585129198Scognet}
586129198Scognet
587129198Scognet/*
588129198Scognet * Like bus_dmamap_load(), but for uios.
589129198Scognet */
590129198Scognetint
591129198Scognetbus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
592129198Scognet    bus_dmamap_callback2_t *callback, void *callback_arg,
593129198Scognet    int flags)
594129198Scognet{
595129198Scognet	vm_offset_t lastaddr;
596129198Scognet#ifdef __GNUC__
597129198Scognet	bus_dma_segment_t dm_segments[dmat->nsegments];
598129198Scognet#else
599129198Scognet	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
600129198Scognet#endif
601129198Scognet	int nsegs, i, error, first;
602129198Scognet	bus_size_t resid;
603129198Scognet	struct iovec *iov;
604129198Scognet	struct thread *td = NULL;
605129198Scognet
606129198Scognet	resid = uio->uio_resid;
607129198Scognet	iov = uio->uio_iov;
608129198Scognet
609129198Scognet	if (uio->uio_segflg == UIO_USERSPACE) {
610129198Scognet		td = uio->uio_td;
611129198Scognet		KASSERT(td != NULL,
612129198Scognet		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
613129198Scognet	}
614129198Scognet
615129198Scognet	first = 1;
616129198Scognet	nsegs = error = 0;
617129198Scognet	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
618129198Scognet		/*
619129198Scognet		 * Now at the first iovec to load.  Load each iovec
620129198Scognet		 * until we have exhausted the residual count.
621129198Scognet		 */
622129198Scognet		bus_size_t minlen =
623129198Scognet		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
624129198Scognet		caddr_t addr = (caddr_t) iov[i].iov_base;
625129198Scognet
626129198Scognet		if (minlen > 0) {
627129198Scognet			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
628129198Scognet			    addr, minlen, td, flags, &lastaddr, &nsegs, first);
629129198Scognet
630129198Scognet			first = 0;
631129198Scognet
632129198Scognet			resid -= minlen;
633129198Scognet		}
634129198Scognet	}
635129198Scognet
636129198Scognet	if (error) {
637129198Scognet		/*
638129198Scognet		 * force "no valid mappings" on error in callback.
639129198Scognet		 */
640129198Scognet		(*callback)(callback_arg, dm_segments, 0, 0, error);
641129198Scognet	} else {
642129198Scognet		(*callback)(callback_arg, dm_segments, nsegs+1,
643129198Scognet		    uio->uio_resid, error);
644129198Scognet	}
645129198Scognet
646129198Scognet	return (error);
647129198Scognet}
648129198Scognet
649129198Scognet/*
650129198Scognet * Release the mapping held by map. A no-op on PowerPC.
651129198Scognet */
652129198Scognetvoid
653129198Scognetbus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
654129198Scognet{
655129198Scognet	arm_dmamap_freesegs(map);
656129198Scognet	return;
657129198Scognet}
658129198Scognet
659129198Scognetvoid
660129198Scognetbus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
661129198Scognet{
662129198Scognet	struct arm_seglist *seg = SLIST_FIRST(&map->seglist);
663129198Scognet
664129198Scognet	if (op != BUS_DMASYNC_PREREAD && op != BUS_DMASYNC_PREWRITE)
665129198Scognet		return;
666129198Scognet	/* Skip cache frobbing if mapping was COHERENT. */
667129198Scognet	if (map->flags & ARM32_DMAMAP_COHERENT) {
668129198Scognet		/* Drain the write buffer. */
669129198Scognet		cpu_drain_writebuf();
670129198Scognet		return;
671129198Scognet	}
672129198Scognet	while (seg) {
673129198Scognet		cpu_dcache_wbinv_range(seg->seg.ds_addr, seg->seg.ds_len);
674129198Scognet		seg = SLIST_NEXT(seg, next);
675129198Scognet	}
676129198Scognet}
677