busdma_machdep-v4.c revision 140313
1139747Simp/*-
24Srgrimes * Copyright (c) 2004 Olivier Houchard
34Srgrimes * Copyright (c) 2002 Peter Grehan
44Srgrimes * Copyright (c) 1997, 1998 Justin T. Gibbs.
58876Srgrimes * All rights reserved.
64Srgrimes *
74Srgrimes * Redistribution and use in source and binary forms, with or without
84Srgrimes * modification, are permitted provided that the following conditions
94Srgrimes * are met:
104Srgrimes * 1. Redistributions of source code must retain the above copyright
118876Srgrimes *    notice, this list of conditions, and the following disclaimer,
128876Srgrimes *    without modification, immediately at the beginning of the file.
134Srgrimes * 2. The name of the author may not be used to endorse or promote products
144Srgrimes *    derived from this software without specific prior written permission.
158876Srgrimes *
164Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
178876Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
184Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
194Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
204Srgrimes * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
214Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
228876Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
234Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
244Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
254Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
264Srgrimes * SUCH DAMAGE.
274Srgrimes *
28272958Spfg *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
294Srgrimes */
304Srgrimes
314Srgrimes#include <sys/cdefs.h>
324Srgrimes__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 140313 2005-01-15 19:42:28Z cognet $");
334Srgrimes
344Srgrimes/*
35116176Sobrien * MacPPC bus dma support routines
36116176Sobrien */
37116176Sobrien
38116176Sobrien#define _ARM32_BUS_DMA_PRIVATE
392056Swollman#include <sys/param.h>
40131952Smarcel#include <sys/systm.h>
41145053Speter#include <sys/malloc.h>
424Srgrimes#include <sys/bus.h>
43131952Smarcel#include <sys/interrupt.h>
44131952Smarcel#include <sys/lock.h>
452056Swollman#include <sys/proc.h>
464Srgrimes#include <sys/mutex.h>
474Srgrimes#include <sys/mbuf.h>
484Srgrimes#include <sys/uio.h>
494Srgrimes#include <sys/ktr.h>
50283248Spfg
514Srgrimes#include <vm/vm.h>
52131952Smarcel#include <vm/vm_page.h>
53131952Smarcel#include <vm/vm_map.h>
54131952Smarcel
554Srgrimes#include <machine/atomic.h>
564Srgrimes#include <machine/bus.h>
57131952Smarcel#include <machine/cpufunc.h>
58131952Smarcel
59285783Sjhbstruct bus_dma_tag {
60285783Sjhb	bus_dma_tag_t		parent;
61131952Smarcel	bus_size_t		alignment;
62131952Smarcel	bus_size_t		boundary;
63131952Smarcel	bus_addr_t		lowaddr;
64131952Smarcel	bus_addr_t		highaddr;
65131952Smarcel	bus_dma_filter_t	*filter;
66131952Smarcel	void			*filterarg;
67131952Smarcel	bus_size_t		maxsize;
68131952Smarcel	u_int			nsegments;
694Srgrimes	bus_size_t		maxsegsz;
70131952Smarcel	int			flags;
714Srgrimes	int			ref_count;
72	int			map_count;
73	bus_dma_lock_t		*lockfunc;
74	void			*lockfuncarg;
75	/*
76	 * DMA range for this tag.  If the page doesn't fall within
77	 * one of these ranges, an error is returned.  The caller
78	 * may then decide what to do with the transfer.  If the
79	 * range pointer is NULL, it is ignored.
80	 */
81	struct arm32_dma_range	*ranges;
82	int			_nranges;
83};
84
85#define DMAMAP_LINEAR		0x1
86#define DMAMAP_MBUF		0x2
87#define DMAMAP_UIO		0x4
88#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
89#define DMAMAP_COHERENT		0x8
90struct bus_dmamap {
91        bus_dma_tag_t	dmat;
92	int		flags;
93	void 		*buffer;
94	int		len;
95};
96
97/*
98 * Check to see if the specified page is in an allowed DMA range.
99 */
100
101static __inline int
102bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
103    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
104    int flags, vm_offset_t *lastaddrp, int *segp);
105
106static __inline struct arm32_dma_range *
107_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
108    bus_addr_t curaddr)
109{
110	struct arm32_dma_range *dr;
111	int i;
112
113	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
114		if (curaddr >= dr->dr_sysbase &&
115		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
116			return (dr);
117	}
118
119	return (NULL);
120}
121/*
122 * Convenience function for manipulating driver locks from busdma (during
123 * busdma_swi, for example).  Drivers that don't provide their own locks
124 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
125 * non-mutex locking scheme don't have to use this at all.
126 */
127void
128busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
129{
130	struct mtx *dmtx;
131
132	dmtx = (struct mtx *)arg;
133	switch (op) {
134	case BUS_DMA_LOCK:
135		mtx_lock(dmtx);
136		break;
137	case BUS_DMA_UNLOCK:
138		mtx_unlock(dmtx);
139		break;
140	default:
141		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
142	}
143}
144
145/*
146 * dflt_lock should never get called.  It gets put into the dma tag when
147 * lockfunc == NULL, which is only valid if the maps that are associated
148 * with the tag are meant to never be defered.
149 * XXX Should have a way to identify which driver is responsible here.
150 */
151static void
152dflt_lock(void *arg, bus_dma_lock_op_t op)
153{
154#ifdef INVARIANTS
155	panic("driver error: busdma dflt_lock called");
156#else
157	printf("DRIVER_ERROR: busdma dflt_lock called\n");
158#endif
159}
160
161/*
162 * Allocate a device specific dma_tag.
163 */
164#define SEG_NB 1024
165
166int
167bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
168		   bus_size_t boundary, bus_addr_t lowaddr,
169		   bus_addr_t highaddr, bus_dma_filter_t *filter,
170		   void *filterarg, bus_size_t maxsize, int nsegments,
171		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
172		   void *lockfuncarg, bus_dma_tag_t *dmat)
173{
174	bus_dma_tag_t newtag;
175	int error = 0;
176	/* Return a NULL tag on failure */
177	*dmat = NULL;
178
179	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
180	if (newtag == NULL) {
181		CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag "
182		    "flags 0x%x error %d", newtag, 0, error);
183		return (ENOMEM);
184	}
185
186	newtag->parent = parent;
187	newtag->alignment = alignment;
188	newtag->boundary = boundary;
189	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
190	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
191	newtag->filter = filter;
192	newtag->filterarg = filterarg;
193        newtag->maxsize = maxsize;
194        newtag->nsegments = nsegments;
195	newtag->maxsegsz = maxsegsz;
196	newtag->flags = flags;
197	newtag->ref_count = 1; /* Count ourself */
198	newtag->map_count = 0;
199	newtag->ranges = bus_dma_get_range();
200	newtag->_nranges = bus_dma_get_range_nb();
201	if (lockfunc != NULL) {
202		newtag->lockfunc = lockfunc;
203		newtag->lockfuncarg = lockfuncarg;
204	} else {
205		newtag->lockfunc = dflt_lock;
206		newtag->lockfuncarg = NULL;
207	}
208        /*
209	 * Take into account any restrictions imposed by our parent tag
210	 */
211        if (parent != NULL) {
212                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
213                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
214		if (newtag->boundary == 0)
215			newtag->boundary = parent->boundary;
216		else if (parent->boundary != 0)
217                	newtag->boundary = min(parent->boundary,
218					       newtag->boundary);
219                if (newtag->filter == NULL) {
220                        /*
221                         * Short circuit looking at our parent directly
222                         * since we have encapsulated all of its information
223                         */
224                        newtag->filter = parent->filter;
225                        newtag->filterarg = parent->filterarg;
226                        newtag->parent = parent->parent;
227		}
228		if (newtag->parent != NULL)
229			atomic_add_int(&parent->ref_count, 1);
230	}
231
232	*dmat = newtag;
233	CTR3(KTR_BUSDMA, "bus_dma_tag_create returned tag %p tag flags 0x%x "
234	    "error %d", newtag, (newtag != NULL ? newtag->flags : 0), error);
235
236	return (error);
237}
238
239int
240bus_dma_tag_destroy(bus_dma_tag_t dmat)
241{
242	bus_dma_tag_t dmat_copy = dmat;
243
244	if (dmat != NULL) {
245
246                if (dmat->map_count != 0)
247                        return (EBUSY);
248
249                while (dmat != NULL) {
250                        bus_dma_tag_t parent;
251
252                        parent = dmat->parent;
253                        atomic_subtract_int(&dmat->ref_count, 1);
254                        if (dmat->ref_count == 0) {
255                                free(dmat, M_DEVBUF);
256                                /*
257                                 * Last reference count, so
258                                 * release our reference
259                                 * count on our parent.
260                                 */
261                                dmat = parent;
262                        } else
263                                dmat = NULL;
264                }
265        }
266	CTR1(KTR_BUSDMA, "bus_dma_tag_destroy tag %p", dmat_copy);
267
268        return (0);
269}
270
271/*
272 * Allocate a handle for mapping from kva/uva/physical
273 * address space into bus device space.
274 */
275int
276bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
277{
278	bus_dmamap_t newmap;
279	int error = 0;
280
281	newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
282	if (newmap == NULL) {
283		CTR2(KTR_BUSDMA, "bus_dmamap_create: tag %p error %d",
284		    dmat, ENOMEM);
285		return (ENOMEM);
286	}
287	*mapp = newmap;
288	newmap->dmat = dmat;
289	newmap->flags = 0;
290	dmat->map_count++;
291
292	CTR3(KTR_BUSDMA, "bus_dmamap_create: tag %p tag flags 0x%x error %d",
293	    dmat, dmat->flags, error);
294
295	return (0);
296}
297
298/*
299 * Destroy a handle for mapping from kva/uva/physical
300 * address space into bus device space.
301 */
302int
303bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
304{
305
306	free(map, M_DEVBUF);
307        dmat->map_count--;
308	CTR1(KTR_BUSDMA, "bus_dmamap_destroy: tag %p error 0", dmat);
309        return (0);
310}
311
312/*
313 * Allocate a piece of memory that can be efficiently mapped into
314 * bus device space based on the constraints lited in the dma tag.
315 * A dmamap to for use with dmamap_load is also allocated.
316 */
317int
318bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
319                 bus_dmamap_t *mapp)
320{
321	bus_dmamap_t newmap = NULL;
322
323	int mflags;
324
325	if (flags & BUS_DMA_NOWAIT)
326		mflags = M_NOWAIT;
327	else
328		mflags = M_WAITOK;
329	if (flags & BUS_DMA_ZERO)
330		mflags |= M_ZERO;
331
332	if (!*mapp) {
333		newmap = malloc(sizeof(*newmap), M_DEVBUF, M_NOWAIT | M_ZERO);
334		if (newmap == NULL) {
335			CTR3(KTR_BUSDMA, "bus_dmamem_alloc: tag %p tag "
336			    "flags %0x%x error Md", dmat, dmat->flags, ENOMEM);
337			return (ENOMEM);
338		}
339		dmat->map_count++;
340		newmap->flags = 0;
341		*mapp = newmap;
342		newmap->dmat = dmat;
343	}
344
345        if (dmat->maxsize <= PAGE_SIZE) {
346                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
347        } else {
348                /*
349                 * XXX Use Contigmalloc until it is merged into this facility
350                 *     and handles multi-seg allocations.  Nobody is doing
351                 *     multi-seg allocations yet though.
352                 */
353                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
354                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
355                    dmat->boundary);
356        }
357        if (*vaddr == NULL) {
358		if (newmap != NULL) {
359			free(newmap, M_DEVBUF);
360			dmat->map_count--;
361		}
362		*mapp = NULL;
363                return (ENOMEM);
364	}
365        return (0);
366}
367
368/*
369 * Free a piece of memory and it's allocated dmamap, that was allocated
370 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
371 */
372void
373bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
374{
375        if (dmat->maxsize <= PAGE_SIZE)
376		free(vaddr, M_DEVBUF);
377        else {
378		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
379	}
380	dmat->map_count--;
381	free(map, M_DEVBUF);
382	CTR2(KTR_BUSDMA, "bus_dmamem_free: tag %p flags 0x%x", dmat,
383	    dmat->flags);
384}
385
386/*
387 * Map the buffer buf into bus space using the dmamap map.
388 */
389int
390bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
391                bus_size_t buflen, bus_dmamap_callback_t *callback,
392                void *callback_arg, int flags)
393{
394     	vm_offset_t	lastaddr = 0;
395	int		error, nsegs = -1;
396#ifdef __GNUC__
397	bus_dma_segment_t dm_segments[dmat->nsegments];
398#else
399	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
400#endif
401
402	map->flags &= ~DMAMAP_TYPE_MASK;
403	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
404	map->buffer = buf;
405	map->len = buflen;
406	error = bus_dmamap_load_buffer(dmat,
407	    dm_segments, map, buf, buflen, kernel_pmap,
408	    flags, &lastaddr, &nsegs);
409	if (error)
410		(*callback)(callback_arg, NULL, 0, error);
411	else
412		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
413
414	CTR4(KTR_BUSDMA, "bus_dmamap_load: tag %p tag flags 0x%x error %d "
415	    "nsegs %d", dmat, dmat->flags, nsegs + 1, error);
416
417	return (0);
418}
419
420/*
421 * Utility function to load a linear buffer.  lastaddrp holds state
422 * between invocations (for multiple-buffer loads).  segp contains
423 * the starting segment on entrance, and the ending segment on exit.
424 * first indicates if this is the first invocation of this function.
425 */
426static int __inline
427bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
428    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
429    int flags, vm_offset_t *lastaddrp, int *segp)
430{
431	bus_size_t sgsize;
432	bus_addr_t curaddr, lastaddr, baddr, bmask;
433	vm_offset_t vaddr = (vm_offset_t)buf;
434	int seg;
435	int error = 0;
436	pd_entry_t *pde;
437	pt_entry_t pte;
438	pt_entry_t *ptep;
439
440	lastaddr = *lastaddrp;
441	bmask = ~(dmat->boundary - 1);
442
443	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
444	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
445
446	for (seg = *segp; buflen > 0 ; ) {
447		/*
448		 * Get the physical address for this segment.
449		 *
450		 * XXX Don't support checking for coherent mappings
451		 * XXX in user address space.
452		 */
453		if (__predict_true(pmap == pmap_kernel())) {
454			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
455			if (__predict_false(pmap_pde_section(pde))) {
456				curaddr = (*pde & L1_S_FRAME) |
457				    (vaddr & L1_S_OFFSET);
458				if (*pde & L1_S_CACHE_MASK) {
459					map->flags &=
460					    ~DMAMAP_COHERENT;
461				}
462			} else {
463				pte = *ptep;
464				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
465				    ("INV type"));
466				if (__predict_false((pte & L2_TYPE_MASK)
467						    == L2_TYPE_L)) {
468					curaddr = (pte & L2_L_FRAME) |
469					    (vaddr & L2_L_OFFSET);
470					if (pte & L2_L_CACHE_MASK) {
471						map->flags &=
472						    ~DMAMAP_COHERENT;
473
474					}
475				} else {
476					curaddr = (pte & L2_S_FRAME) |
477					    (vaddr & L2_S_OFFSET);
478					if (pte & L2_S_CACHE_MASK) {
479						map->flags &=
480						    ~DMAMAP_COHERENT;
481					}
482				}
483			}
484		} else {
485			curaddr = pmap_extract(pmap, vaddr);
486			map->flags &= ~DMAMAP_COHERENT;
487		}
488
489		if (dmat->ranges) {
490			struct arm32_dma_range *dr;
491
492			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
493			    curaddr);
494			if (dr == NULL)
495				return (EINVAL);
496			/*
497		     	 * In a valid DMA range.  Translate the physical
498			 * memory address to an address in the DMA window.
499			 */
500			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
501
502		}
503		/*
504		 * Compute the segment size, and adjust counts.
505		 */
506		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
507		if (buflen < sgsize)
508			sgsize = buflen;
509
510		/*
511		 * Make sure we don't cross any boundaries.
512		 */
513		if (dmat->boundary > 0) {
514			baddr = (curaddr + dmat->boundary) & bmask;
515			if (sgsize > (baddr - curaddr))
516				sgsize = (baddr - curaddr);
517		}
518
519		/*
520		 * Insert chunk into a segment, coalescing with
521		 * the previous segment if possible.
522		 */
523		if (seg >= 0 && curaddr == lastaddr &&
524		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
525		    (dmat->boundary == 0 ||
526		     (segs[seg].ds_addr & bmask) ==
527		     (curaddr & bmask))) {
528			segs[seg].ds_len += sgsize;
529				goto segdone;
530		} else {
531			if (++seg >= dmat->nsegments)
532				break;
533			segs[seg].ds_addr = curaddr;
534			segs[seg].ds_len = sgsize;
535		}
536		if (error)
537			break;
538segdone:
539		lastaddr = curaddr + sgsize;
540		vaddr += sgsize;
541		buflen -= sgsize;
542	}
543
544	*segp = seg;
545	*lastaddrp = lastaddr;
546
547	/*
548	 * Did we fit?
549	 */
550	if (buflen != 0)
551		error = EFBIG; /* XXX better return value here? */
552	return (error);
553}
554
555/*
556 * Like bus_dmamap_load(), but for mbufs.
557 */
558int
559bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
560		     bus_dmamap_callback2_t *callback, void *callback_arg,
561		     int flags)
562{
563#ifdef __GNUC__
564	bus_dma_segment_t dm_segments[dmat->nsegments];
565#else
566	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
567#endif
568	int nsegs = -1, error = 0;
569
570	M_ASSERTPKTHDR(m0);
571
572	map->flags &= ~DMAMAP_TYPE_MASK;
573	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
574	map->buffer = m0;
575	if (m0->m_pkthdr.len <= dmat->maxsize) {
576		vm_offset_t lastaddr = 0;
577		struct mbuf *m;
578
579		for (m = m0; m != NULL && error == 0; m = m->m_next) {
580			if (m->m_len > 0)
581				error = bus_dmamap_load_buffer(dmat,
582				    dm_segments, map, m->m_data, m->m_len,
583				    pmap_kernel(), flags, &lastaddr, &nsegs);
584		}
585	} else {
586		error = EINVAL;
587	}
588
589	if (error) {
590		/*
591		 * force "no valid mappings" on error in callback.
592		 */
593		(*callback)(callback_arg, dm_segments, 0, 0, error);
594	} else {
595		(*callback)(callback_arg, dm_segments, nsegs + 1,
596		    m0->m_pkthdr.len, error);
597	}
598	CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x "
599	    "error %d nsegs %d", dmat, dmat->flags, error, nsegs + 1);
600
601	return (error);
602}
603
604int
605bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
606			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
607			int flags)
608{
609	int error = 0;
610	M_ASSERTPKTHDR(m0);
611
612	flags |= BUS_DMA_NOWAIT;
613	*nsegs = -1;
614	if (m0->m_pkthdr.len <= dmat->maxsize) {
615		int first = 1;
616		vm_offset_t lastaddr = 0;
617		struct mbuf *m;
618
619		for (m = m0; m != NULL && error == 0; m = m->m_next) {
620			if (m->m_len > 0) {
621				error = bus_dmamap_load_buffer(dmat, segs, map,
622						m->m_data, m->m_len,
623						pmap_kernel(), flags, &lastaddr,
624						nsegs);
625				first = 0;
626			}
627		}
628	} else {
629		error = EINVAL;
630	}
631
632	/* XXX FIXME: Having to increment nsegs is really annoying */
633	++*nsegs;
634	CTR4(KTR_BUSDMA, "bus_dmamap_load_mbuf: tag %p tag flags 0x%x "
635	    "error %d nsegs %d", dmat, dmat->flags, error, *nsegs);
636	return (error);
637}
638
639/*
640 * Like bus_dmamap_load(), but for uios.
641 */
642int
643bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
644    bus_dmamap_callback2_t *callback, void *callback_arg,
645    int flags)
646{
647	vm_offset_t lastaddr;
648#ifdef __GNUC__
649	bus_dma_segment_t dm_segments[dmat->nsegments];
650#else
651	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
652#endif
653	int nsegs, i, error;
654	bus_size_t resid;
655	struct iovec *iov;
656	struct pmap *pmap;
657
658	resid = uio->uio_resid;
659	iov = uio->uio_iov;
660	map->flags &= ~DMAMAP_TYPE_MASK;
661	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
662	map->buffer = uio;
663
664	if (uio->uio_segflg == UIO_USERSPACE) {
665		KASSERT(uio->uio_td != NULL,
666		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
667		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
668	} else
669		pmap = kernel_pmap;
670
671	error = 0;
672	nsegs = -1;
673	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
674		/*
675		 * Now at the first iovec to load.  Load each iovec
676		 * until we have exhausted the residual count.
677		 */
678		bus_size_t minlen =
679		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
680		caddr_t addr = (caddr_t) iov[i].iov_base;
681
682		if (minlen > 0) {
683			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
684			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
685
686			resid -= minlen;
687		}
688	}
689
690	if (error) {
691		/*
692		 * force "no valid mappings" on error in callback.
693		 */
694		(*callback)(callback_arg, dm_segments, 0, 0, error);
695	} else {
696		(*callback)(callback_arg, dm_segments, nsegs+1,
697		    uio->uio_resid, error);
698	}
699
700	CTR4(KTR_BUSDMA, "bus_dmamap_load_uio: tag %p tag flags 0x%x "
701	    "error %d nsegs %d", dmat, dmat->flags, error, nsegs + 1);
702	return (error);
703}
704
705/*
706 * Release the mapping held by map.
707 */
708void
709bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
710{
711	map->flags &= ~DMAMAP_TYPE_MASK;
712	return;
713}
714
715static void
716bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
717{
718
719	if (op & BUS_DMASYNC_POSTREAD ||
720	    op == (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) {
721		cpu_dcache_wbinv_range((vm_offset_t)buf, len);
722		return;
723	}
724	if (op & BUS_DMASYNC_PREWRITE)
725		cpu_dcache_wb_range((vm_offset_t)buf, len);
726	if (op & BUS_DMASYNC_PREREAD) {
727		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
728 			cpu_dcache_inv_range((vm_offset_t)buf, len);
729		else
730			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
731	}
732}
733
734void
735bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
736{
737	struct mbuf *m;
738	struct uio *uio;
739	int resid;
740	struct iovec *iov;
741
742	if (op == BUS_DMASYNC_POSTWRITE)
743		return;
744	if (map->flags & DMAMAP_COHERENT)
745		return;
746	CTR2(KTR_BUSDMA, "bus_dmamap_sync: op %x flags %x", op, map->flags);
747	switch(map->flags & DMAMAP_TYPE_MASK) {
748	case DMAMAP_LINEAR:
749		bus_dmamap_sync_buf(map->buffer, map->len, op);
750		break;
751	case DMAMAP_MBUF:
752		m = map->buffer;
753		while (m) {
754			bus_dmamap_sync_buf(m->m_data, m->m_len, op);
755			m = m->m_next;
756		}
757		break;
758	case DMAMAP_UIO:
759		uio = map->buffer;
760		iov = uio->uio_iov;
761		resid = uio->uio_resid;
762		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
763			bus_size_t minlen = resid < iov[i].iov_len ? resid :
764			    iov[i].iov_len;
765			if (minlen > 0) {
766				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
767				    op);
768				resid -= minlen;
769			}
770		}
771		break;
772	default:
773		break;
774	}
775	cpu_drain_writebuf();
776}
777