busdma_machdep-v4.c revision 150860
1/*-
2 * Copyright (c) 2004 Olivier Houchard
3 * Copyright (c) 2002 Peter Grehan
4 * Copyright (c) 1997, 1998 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 *   From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/arm/arm/busdma_machdep.c 150860 2005-10-03 14:07:57Z cognet $");
33
34/*
35 * MacPPC bus dma support routines
36 */
37
38#define _ARM32_BUS_DMA_PRIVATE
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/lock.h>
45#include <sys/proc.h>
46#include <sys/mutex.h>
47#include <sys/mbuf.h>
48#include <sys/uio.h>
49#include <sys/ktr.h>
50#include <sys/kernel.h>
51
52#include <vm/vm.h>
53#include <vm/vm_page.h>
54#include <vm/vm_map.h>
55
56#include <machine/atomic.h>
57#include <machine/bus.h>
58#include <machine/cpufunc.h>
59
60struct bus_dma_tag {
61	bus_dma_tag_t		parent;
62	bus_size_t		alignment;
63	bus_size_t		boundary;
64	bus_addr_t		lowaddr;
65	bus_addr_t		highaddr;
66	bus_dma_filter_t	*filter;
67	void			*filterarg;
68	bus_size_t		maxsize;
69	u_int			nsegments;
70	bus_size_t		maxsegsz;
71	int			flags;
72	int			ref_count;
73	int			map_count;
74	bus_dma_lock_t		*lockfunc;
75	void			*lockfuncarg;
76	/*
77	 * DMA range for this tag.  If the page doesn't fall within
78	 * one of these ranges, an error is returned.  The caller
79	 * may then decide what to do with the transfer.  If the
80	 * range pointer is NULL, it is ignored.
81	 */
82	struct arm32_dma_range	*ranges;
83	int			_nranges;
84};
85
86#define DMAMAP_LINEAR		0x1
87#define DMAMAP_MBUF		0x2
88#define DMAMAP_UIO		0x4
89#define DMAMAP_ALLOCATED	0x10
90#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
91#define DMAMAP_COHERENT		0x8
92struct bus_dmamap {
93        bus_dma_tag_t	dmat;
94	int		flags;
95	void 		*buffer;
96	TAILQ_ENTRY(bus_dmamap)	freelist;
97	int		len;
98};
99
100static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
101	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
102
103#define BUSDMA_STATIC_MAPS	500
104static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
105
106static struct mtx busdma_mtx;
107
108MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
109
110static void
111arm_dmamap_freelist_init(void *dummy)
112{
113	int i;
114
115	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
116		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
117}
118
119SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, arm_dmamap_freelist_init, NULL);
120
121/*
122 * Check to see if the specified page is in an allowed DMA range.
123 */
124
125static __inline int
126bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
127    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
128    int flags, vm_offset_t *lastaddrp, int *segp);
129
130static __inline struct arm32_dma_range *
131_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges,
132    bus_addr_t curaddr)
133{
134	struct arm32_dma_range *dr;
135	int i;
136
137	for (i = 0, dr = ranges; i < nranges; i++, dr++) {
138		if (curaddr >= dr->dr_sysbase &&
139		    round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len))
140			return (dr);
141	}
142
143	return (NULL);
144}
145/*
146 * Convenience function for manipulating driver locks from busdma (during
147 * busdma_swi, for example).  Drivers that don't provide their own locks
148 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
149 * non-mutex locking scheme don't have to use this at all.
150 */
151void
152busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
153{
154	struct mtx *dmtx;
155
156	dmtx = (struct mtx *)arg;
157	switch (op) {
158	case BUS_DMA_LOCK:
159		mtx_lock(dmtx);
160		break;
161	case BUS_DMA_UNLOCK:
162		mtx_unlock(dmtx);
163		break;
164	default:
165		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
166	}
167}
168
169/*
170 * dflt_lock should never get called.  It gets put into the dma tag when
171 * lockfunc == NULL, which is only valid if the maps that are associated
172 * with the tag are meant to never be defered.
173 * XXX Should have a way to identify which driver is responsible here.
174 */
175static void
176dflt_lock(void *arg, bus_dma_lock_op_t op)
177{
178#ifdef INVARIANTS
179	panic("driver error: busdma dflt_lock called");
180#else
181	printf("DRIVER_ERROR: busdma dflt_lock called\n");
182#endif
183}
184
185static __inline bus_dmamap_t
186_busdma_alloc_dmamap(void)
187{
188	bus_dmamap_t map;
189
190	mtx_lock(&busdma_mtx);
191	map = TAILQ_FIRST(&dmamap_freelist);
192	if (map)
193		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
194	mtx_unlock(&busdma_mtx);
195	if (!map) {
196		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
197		if (map)
198			map->flags = DMAMAP_ALLOCATED;
199	} else
200		map->flags = 0;
201	return (map);
202}
203
204static __inline void
205_busdma_free_dmamap(bus_dmamap_t map)
206{
207	if (map->flags & DMAMAP_ALLOCATED)
208		free(map, M_DEVBUF);
209	else {
210		mtx_lock(&busdma_mtx);
211		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
212		mtx_unlock(&busdma_mtx);
213	}
214}
215
216/*
217 * Allocate a device specific dma_tag.
218 */
219#define SEG_NB 1024
220
221int
222bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
223		   bus_size_t boundary, bus_addr_t lowaddr,
224		   bus_addr_t highaddr, bus_dma_filter_t *filter,
225		   void *filterarg, bus_size_t maxsize, int nsegments,
226		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
227		   void *lockfuncarg, bus_dma_tag_t *dmat)
228{
229	bus_dma_tag_t newtag;
230	int error = 0;
231	/* Return a NULL tag on failure */
232	*dmat = NULL;
233
234	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT);
235	if (newtag == NULL) {
236		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
237		    __func__, newtag, 0, error);
238		return (ENOMEM);
239	}
240
241	newtag->parent = parent;
242	newtag->alignment = alignment;
243	newtag->boundary = boundary;
244	newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1);
245	newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1);
246	newtag->filter = filter;
247	newtag->filterarg = filterarg;
248        newtag->maxsize = maxsize;
249        newtag->nsegments = nsegments;
250	newtag->maxsegsz = maxsegsz;
251	newtag->flags = flags;
252	newtag->ref_count = 1; /* Count ourself */
253	newtag->map_count = 0;
254	newtag->ranges = bus_dma_get_range();
255	newtag->_nranges = bus_dma_get_range_nb();
256	if (lockfunc != NULL) {
257		newtag->lockfunc = lockfunc;
258		newtag->lockfuncarg = lockfuncarg;
259	} else {
260		newtag->lockfunc = dflt_lock;
261		newtag->lockfuncarg = NULL;
262	}
263        /*
264	 * Take into account any restrictions imposed by our parent tag
265	 */
266        if (parent != NULL) {
267                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
268                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
269		if (newtag->boundary == 0)
270			newtag->boundary = parent->boundary;
271		else if (parent->boundary != 0)
272                	newtag->boundary = min(parent->boundary,
273					       newtag->boundary);
274                if (newtag->filter == NULL) {
275                        /*
276                         * Short circuit looking at our parent directly
277                         * since we have encapsulated all of its information
278                         */
279                        newtag->filter = parent->filter;
280                        newtag->filterarg = parent->filterarg;
281                        newtag->parent = parent->parent;
282		}
283		if (newtag->parent != NULL)
284			atomic_add_int(&parent->ref_count, 1);
285	}
286
287	*dmat = newtag;
288	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
289	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
290
291	return (error);
292}
293
294int
295bus_dma_tag_destroy(bus_dma_tag_t dmat)
296{
297#ifdef KTR
298	bus_dma_tag_t dmat_copy = dmat;
299#endif
300
301	if (dmat != NULL) {
302
303                if (dmat->map_count != 0)
304                        return (EBUSY);
305
306                while (dmat != NULL) {
307                        bus_dma_tag_t parent;
308
309                        parent = dmat->parent;
310                        atomic_subtract_int(&dmat->ref_count, 1);
311                        if (dmat->ref_count == 0) {
312                                free(dmat, M_DEVBUF);
313                                /*
314                                 * Last reference count, so
315                                 * release our reference
316                                 * count on our parent.
317                                 */
318                                dmat = parent;
319                        } else
320                                dmat = NULL;
321                }
322        }
323	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
324
325        return (0);
326}
327
328/*
329 * Allocate a handle for mapping from kva/uva/physical
330 * address space into bus device space.
331 */
332int
333bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
334{
335	bus_dmamap_t newmap;
336#ifdef KTR
337	int error = 0;
338#endif
339
340	newmap = _busdma_alloc_dmamap();
341	if (newmap == NULL) {
342		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
343		return (ENOMEM);
344	}
345	*mapp = newmap;
346	newmap->dmat = dmat;
347	dmat->map_count++;
348
349	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
350	    __func__, dmat, dmat->flags, error);
351
352	return (0);
353}
354
355/*
356 * Destroy a handle for mapping from kva/uva/physical
357 * address space into bus device space.
358 */
359int
360bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
361{
362
363	_busdma_free_dmamap(map);
364        dmat->map_count--;
365	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
366        return (0);
367}
368
369/*
370 * Allocate a piece of memory that can be efficiently mapped into
371 * bus device space based on the constraints lited in the dma tag.
372 * A dmamap to for use with dmamap_load is also allocated.
373 */
374int
375bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
376                 bus_dmamap_t *mapp)
377{
378	bus_dmamap_t newmap = NULL;
379
380	int mflags;
381
382	if (flags & BUS_DMA_NOWAIT)
383		mflags = M_NOWAIT;
384	else
385		mflags = M_WAITOK;
386	if (flags & BUS_DMA_ZERO)
387		mflags |= M_ZERO;
388
389	newmap = _busdma_alloc_dmamap();
390	if (newmap == NULL) {
391		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
392		    __func__, dmat, dmat->flags, ENOMEM);
393		return (ENOMEM);
394	}
395	dmat->map_count++;
396	*mapp = newmap;
397	newmap->dmat = dmat;
398
399        if (dmat->maxsize <= PAGE_SIZE) {
400                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
401        } else {
402                /*
403                 * XXX Use Contigmalloc until it is merged into this facility
404                 *     and handles multi-seg allocations.  Nobody is doing
405                 *     multi-seg allocations yet though.
406                 */
407                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
408                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
409                    dmat->boundary);
410        }
411        if (*vaddr == NULL) {
412		if (newmap != NULL) {
413			_busdma_free_dmamap(newmap);
414			dmat->map_count--;
415		}
416		*mapp = NULL;
417                return (ENOMEM);
418	}
419        return (0);
420}
421
422/*
423 * Free a piece of memory and it's allocated dmamap, that was allocated
424 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
425 */
426void
427bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
428{
429        if (dmat->maxsize <= PAGE_SIZE)
430		free(vaddr, M_DEVBUF);
431        else {
432		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
433	}
434	dmat->map_count--;
435	_busdma_free_dmamap(map);
436	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
437}
438
439/*
440 * Utility function to load a linear buffer.  lastaddrp holds state
441 * between invocations (for multiple-buffer loads).  segp contains
442 * the starting segment on entrance, and the ending segment on exit.
443 * first indicates if this is the first invocation of this function.
444 */
445static __inline int
446bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
447    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
448    int flags, vm_offset_t *lastaddrp, int *segp)
449{
450	bus_size_t sgsize;
451	bus_addr_t curaddr, lastaddr, baddr, bmask;
452	vm_offset_t vaddr = (vm_offset_t)buf;
453	int seg;
454	int error = 0;
455	pd_entry_t *pde;
456	pt_entry_t pte;
457	pt_entry_t *ptep;
458
459	lastaddr = *lastaddrp;
460	bmask = ~(dmat->boundary - 1);
461
462	CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, "
463	    "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment);
464
465	for (seg = *segp; buflen > 0 ; ) {
466		/*
467		 * Get the physical address for this segment.
468		 *
469		 * XXX Don't support checking for coherent mappings
470		 * XXX in user address space.
471		 */
472		if (__predict_true(pmap == pmap_kernel())) {
473			(void) pmap_get_pde_pte(pmap, vaddr, &pde, &ptep);
474			if (__predict_false(pmap_pde_section(pde))) {
475				curaddr = (*pde & L1_S_FRAME) |
476				    (vaddr & L1_S_OFFSET);
477				if (*pde & L1_S_CACHE_MASK) {
478					map->flags &=
479					    ~DMAMAP_COHERENT;
480				}
481			} else {
482				pte = *ptep;
483				KASSERT((pte & L2_TYPE_MASK) != L2_TYPE_INV,
484				    ("INV type"));
485				if (__predict_false((pte & L2_TYPE_MASK)
486						    == L2_TYPE_L)) {
487					curaddr = (pte & L2_L_FRAME) |
488					    (vaddr & L2_L_OFFSET);
489					if (pte & L2_L_CACHE_MASK) {
490						map->flags &=
491						    ~DMAMAP_COHERENT;
492
493					}
494				} else {
495					curaddr = (pte & L2_S_FRAME) |
496					    (vaddr & L2_S_OFFSET);
497					pmap_uncache(ptep);
498					map->flags |= DMAMAP_UNCACHED;
499#if 0
500					if (pte & L2_S_CACHE_MASK) {
501						map->flags &=
502						    ~DMAMAP_COHERENT;
503					}
504#endif
505				}
506			}
507		} else {
508			curaddr = pmap_extract(pmap, vaddr);
509			map->flags &= ~DMAMAP_COHERENT;
510		}
511
512		if (dmat->ranges) {
513			struct arm32_dma_range *dr;
514
515			dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges,
516			    curaddr);
517			if (dr == NULL)
518				return (EINVAL);
519			/*
520		     	 * In a valid DMA range.  Translate the physical
521			 * memory address to an address in the DMA window.
522			 */
523			curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase;
524
525		}
526		/*
527		 * Compute the segment size, and adjust counts.
528		 */
529		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
530		if (buflen < sgsize)
531			sgsize = buflen;
532
533		/*
534		 * Make sure we don't cross any boundaries.
535		 */
536		if (dmat->boundary > 0) {
537			baddr = (curaddr + dmat->boundary) & bmask;
538			if (sgsize > (baddr - curaddr))
539				sgsize = (baddr - curaddr);
540		}
541
542		/*
543		 * Insert chunk into a segment, coalescing with
544		 * the previous segment if possible.
545		 */
546		if (seg >= 0 && curaddr == lastaddr &&
547		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
548		    (dmat->boundary == 0 ||
549		     (segs[seg].ds_addr & bmask) ==
550		     (curaddr & bmask))) {
551			segs[seg].ds_len += sgsize;
552			goto segdone;
553		} else {
554			if (++seg >= dmat->nsegments)
555				break;
556			segs[seg].ds_addr = curaddr;
557			segs[seg].ds_len = sgsize;
558		}
559		if (error)
560			break;
561segdone:
562		lastaddr = curaddr + sgsize;
563		vaddr += sgsize;
564		buflen -= sgsize;
565	}
566
567	*segp = seg;
568	*lastaddrp = lastaddr;
569
570	/*
571	 * Did we fit?
572	 */
573	if (buflen != 0)
574		error = EFBIG; /* XXX better return value here? */
575	return (error);
576}
577
578/*
579 * Map the buffer buf into bus space using the dmamap map.
580 */
581int
582bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
583                bus_size_t buflen, bus_dmamap_callback_t *callback,
584                void *callback_arg, int flags)
585{
586     	vm_offset_t	lastaddr = 0;
587	int		error, nsegs = -1;
588#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
589	bus_dma_segment_t dm_segments[dmat->nsegments];
590#else
591	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
592#endif
593
594	KASSERT(dmat != NULL, ("dmatag is NULL"));
595	KASSERT(map != NULL, ("dmamap is NULL"));
596	map->flags &= ~DMAMAP_TYPE_MASK;
597	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
598	map->buffer = buf;
599	map->len = buflen;
600	error = bus_dmamap_load_buffer(dmat,
601	    dm_segments, map, buf, buflen, kernel_pmap,
602	    flags, &lastaddr, &nsegs);
603	if (error)
604		(*callback)(callback_arg, NULL, 0, error);
605	else
606		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
607
608	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
609	    __func__, dmat, dmat->flags, nsegs + 1, error);
610
611	return (0);
612}
613
614/*
615 * Like bus_dmamap_load(), but for mbufs.
616 */
617int
618bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
619		     bus_dmamap_callback2_t *callback, void *callback_arg,
620		     int flags)
621{
622#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
623	bus_dma_segment_t dm_segments[dmat->nsegments];
624#else
625	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
626#endif
627	int nsegs = -1, error = 0;
628
629	M_ASSERTPKTHDR(m0);
630
631	map->flags &= ~DMAMAP_TYPE_MASK;
632	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
633	map->buffer = m0;
634	map->len = 0;
635	if (m0->m_pkthdr.len <= dmat->maxsize) {
636		vm_offset_t lastaddr = 0;
637		struct mbuf *m;
638
639		for (m = m0; m != NULL && error == 0; m = m->m_next) {
640			if (m->m_len > 0) {
641				error = bus_dmamap_load_buffer(dmat,
642				    dm_segments, map, m->m_data, m->m_len,
643				    pmap_kernel(), flags, &lastaddr, &nsegs);
644				map->len += m->m_len;
645			}
646		}
647	} else {
648		error = EINVAL;
649	}
650
651	if (error) {
652		/*
653		 * force "no valid mappings" on error in callback.
654		 */
655		(*callback)(callback_arg, dm_segments, 0, 0, error);
656	} else {
657		(*callback)(callback_arg, dm_segments, nsegs + 1,
658		    m0->m_pkthdr.len, error);
659	}
660	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
661	    __func__, dmat, dmat->flags, error, nsegs + 1);
662
663	return (error);
664}
665
666int
667bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
668			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
669			int flags)
670{
671	int error = 0;
672	M_ASSERTPKTHDR(m0);
673
674	flags |= BUS_DMA_NOWAIT;
675	*nsegs = -1;
676	map->flags &= ~DMAMAP_TYPE_MASK;
677	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
678	map->buffer = m0;
679	map->len = 0;
680	if (m0->m_pkthdr.len <= dmat->maxsize) {
681		vm_offset_t lastaddr = 0;
682		struct mbuf *m;
683
684		for (m = m0; m != NULL && error == 0; m = m->m_next) {
685			if (m->m_len > 0) {
686				error = bus_dmamap_load_buffer(dmat, segs, map,
687						m->m_data, m->m_len,
688						pmap_kernel(), flags, &lastaddr,
689						nsegs);
690				map->len += m->m_len;
691			}
692		}
693	} else {
694		error = EINVAL;
695	}
696
697	/* XXX FIXME: Having to increment nsegs is really annoying */
698	++*nsegs;
699	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
700	    __func__, dmat, dmat->flags, error, *nsegs);
701	return (error);
702}
703
704/*
705 * Like bus_dmamap_load(), but for uios.
706 */
707int
708bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
709    bus_dmamap_callback2_t *callback, void *callback_arg,
710    int flags)
711{
712	vm_offset_t lastaddr;
713#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
714	bus_dma_segment_t dm_segments[dmat->nsegments];
715#else
716	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
717#endif
718	int nsegs, i, error;
719	bus_size_t resid;
720	struct iovec *iov;
721	struct pmap *pmap;
722
723	resid = uio->uio_resid;
724	iov = uio->uio_iov;
725	map->flags &= ~DMAMAP_TYPE_MASK;
726	map->flags |= DMAMAP_UIO|DMAMAP_COHERENT;
727	map->buffer = uio;
728	map->len = 0;
729
730	if (uio->uio_segflg == UIO_USERSPACE) {
731		KASSERT(uio->uio_td != NULL,
732		    ("bus_dmamap_load_uio: USERSPACE but no proc"));
733		pmap = vmspace_pmap(uio->uio_td->td_proc->p_vmspace);
734	} else
735		pmap = kernel_pmap;
736
737	error = 0;
738	nsegs = -1;
739	for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
740		/*
741		 * Now at the first iovec to load.  Load each iovec
742		 * until we have exhausted the residual count.
743		 */
744		bus_size_t minlen =
745		    resid < iov[i].iov_len ? resid : iov[i].iov_len;
746		caddr_t addr = (caddr_t) iov[i].iov_base;
747
748		if (minlen > 0) {
749			error = bus_dmamap_load_buffer(dmat, dm_segments, map,
750			    addr, minlen, pmap, flags, &lastaddr, &nsegs);
751
752			map->len += minlen;
753			resid -= minlen;
754		}
755	}
756
757	if (error) {
758		/*
759		 * force "no valid mappings" on error in callback.
760		 */
761		(*callback)(callback_arg, dm_segments, 0, 0, error);
762	} else {
763		(*callback)(callback_arg, dm_segments, nsegs+1,
764		    uio->uio_resid, error);
765	}
766
767	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
768	    __func__, dmat, dmat->flags, error, nsegs + 1);
769	return (error);
770}
771
772/*
773 * Release the mapping held by map.
774 */
775void
776_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
777{
778	struct mbuf *m;
779	struct uio *uio;
780	int resid;
781	struct iovec *iov;
782
783	if (map->flags & DMAMAP_UNCACHED) {
784		switch(map->flags & DMAMAP_TYPE_MASK) {
785		case DMAMAP_LINEAR:
786			pmap_recache(map->buffer, len);
787			break;
788		case DMAMAP_MBUF:
789			m = map->buffer;
790			while (m) {
791				if (m->m_len > 0)
792					pmap_recache(m->m_data, len);
793				m = m->m_next;
794			}
795			break;
796		case DMAMAP_UIO:
797			uio = map->buffer;
798			iov = uio->uio_iov;
799			resid = uio->uio_resid;
800			for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
801				bus_size_t minlen = resid < iov[i].iov_len ? resid :
802				    iov[i].iov_len;
803				if (minlen > 0) {
804					pmap_recache(iov[i].iov_base, minlen);
805					resid -= minlen;
806				}
807			}
808			break;
809		default:
810			break;
811		}
812
813	}
814	map->flags &= ~DMAMAP_TYPE_MASK;
815	return;
816}
817
818static __inline void
819bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
820{
821
822	if (op & BUS_DMASYNC_PREWRITE)
823		cpu_dcache_wb_range((vm_offset_t)buf, len);
824	if (op & BUS_DMASYNC_POSTREAD) {
825		if ((((vm_offset_t)buf | len) & arm_dcache_align_mask) == 0)
826			cpu_dcache_inv_range((vm_offset_t)buf, len);
827		else
828			cpu_dcache_wbinv_range((vm_offset_t)buf, len);
829
830	}
831}
832
833void
834_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
835{
836	struct mbuf *m;
837	struct uio *uio;
838	int resid;
839	struct iovec *iov;
840
841	if (!(op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)))
842		return;
843	if (map->flags & DMAMAP_COHERENT) {
844		printf("COHERENT\n");
845		return;
846	}
847	if ((op && BUS_DMASYNC_POSTREAD) && (map->len >= 2 * PAGE_SIZE)) {
848		cpu_dcache_wbinv_all();
849		return;
850	}
851	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
852	switch(map->flags & DMAMAP_TYPE_MASK) {
853	case DMAMAP_LINEAR:
854		bus_dmamap_sync_buf(map->buffer, map->len, op);
855		break;
856	case DMAMAP_MBUF:
857		m = map->buffer;
858		while (m) {
859			if (m->m_len > 0)
860				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
861			m = m->m_next;
862		}
863		break;
864	case DMAMAP_UIO:
865		uio = map->buffer;
866		iov = uio->uio_iov;
867		resid = uio->uio_resid;
868		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
869			bus_size_t minlen = resid < iov[i].iov_len ? resid :
870			    iov[i].iov_len;
871			if (minlen > 0) {
872				bus_dmamap_sync_buf(iov[i].iov_base, minlen,
873				    op);
874				resid -= minlen;
875			}
876		}
877		break;
878	default:
879		break;
880	}
881	cpu_drain_writebuf();
882}
883