busdma_machdep.c revision 178172
1/*-
2 * Copyright (c) 2006 Fill this file and put your name here
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#define NO_DMA
29
30/*-
31 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
32 * All rights reserved.
33 *
34 * This code is derived from software contributed to The NetBSD Foundation
35 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
36 * NASA Ames Research Center.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in the
45 *    documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 *    must display the following acknowledgement:
48 *	This product includes software developed by the NetBSD
49 *	Foundation, Inc. and its contributors.
50 * 4. Neither the name of The NetBSD Foundation nor the names of its
51 *    contributors may be used to endorse or promote products derived
52 *    from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE.
65 */
66
67/*	$NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $	*/
68
69#include <sys/cdefs.h>
70__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 178172 2008-04-13 07:27:37Z imp $");
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/malloc.h>
75#include <sys/bus.h>
76#include <sys/interrupt.h>
77#include <sys/lock.h>
78#include <sys/proc.h>
79#include <sys/mutex.h>
80#include <sys/mbuf.h>
81#include <sys/uio.h>
82#include <sys/ktr.h>
83#include <sys/kernel.h>
84
85#include <vm/vm.h>
86#include <vm/vm_page.h>
87#include <vm/vm_map.h>
88
89#include <machine/atomic.h>
90#include <machine/bus.h>
91#include <machine/cache.h>
92#include <machine/cpufunc.h>
93
94struct bus_dma_tag {
95	bus_dma_tag_t		parent;
96	bus_size_t		alignment;
97	bus_size_t		boundary;
98	bus_addr_t		lowaddr;
99	bus_addr_t		highaddr;
100	bus_dma_filter_t	*filter;
101	void			*filterarg;
102	bus_size_t		maxsize;
103	u_int			nsegments;
104	bus_size_t		maxsegsz;
105	int			flags;
106	int			ref_count;
107	int			map_count;
108	bus_dma_lock_t		*lockfunc;
109	void			*lockfuncarg;
110	/* XXX: machine-dependent fields */
111	vm_offset_t		_physbase;
112	vm_offset_t		_wbase;
113	vm_offset_t		_wsize;
114};
115
116#define DMAMAP_LINEAR		0x1
117#define DMAMAP_MBUF		0x2
118#define DMAMAP_UIO		0x4
119#define DMAMAP_ALLOCATED	0x10
120#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
121#define DMAMAP_COHERENT		0x8
122struct bus_dmamap {
123        bus_dma_tag_t	dmat;
124	int		flags;
125	void 		*buffer;
126	void		*origbuffer;
127	void		*allocbuffer;
128	TAILQ_ENTRY(bus_dmamap)	freelist;
129	int		len;
130};
131
132static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
133	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
134
135#define BUSDMA_STATIC_MAPS	500
136static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
137
138static struct mtx busdma_mtx;
139
140MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
141
142static void
143mips_dmamap_freelist_init(void *dummy)
144{
145	int i;
146
147	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
148		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
149}
150
151SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
152
153/*
154 * Check to see if the specified page is in an allowed DMA range.
155 */
156
157static __inline int
158bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
159    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
160    int flags, vm_offset_t *lastaddrp, int *segp);
161
162/*
163 * Convenience function for manipulating driver locks from busdma (during
164 * busdma_swi, for example).  Drivers that don't provide their own locks
165 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
166 * non-mutex locking scheme don't have to use this at all.
167 */
168void
169busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
170{
171	struct mtx *dmtx;
172
173	dmtx = (struct mtx *)arg;
174	switch (op) {
175	case BUS_DMA_LOCK:
176		mtx_lock(dmtx);
177		break;
178	case BUS_DMA_UNLOCK:
179		mtx_unlock(dmtx);
180		break;
181	default:
182		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
183	}
184}
185
186/*
187 * dflt_lock should never get called.  It gets put into the dma tag when
188 * lockfunc == NULL, which is only valid if the maps that are associated
189 * with the tag are meant to never be defered.
190 * XXX Should have a way to identify which driver is responsible here.
191 */
192#ifndef NO_DMA
193static void
194dflt_lock(void *arg, bus_dma_lock_op_t op)
195{
196#ifdef INVARIANTS
197	panic("driver error: busdma dflt_lock called");
198#else
199	printf("DRIVER_ERROR: busdma dflt_lock called\n");
200#endif
201}
202#endif
203
204static __inline bus_dmamap_t
205_busdma_alloc_dmamap(void)
206{
207	bus_dmamap_t map;
208
209	mtx_lock(&busdma_mtx);
210	map = TAILQ_FIRST(&dmamap_freelist);
211	if (map)
212		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
213	mtx_unlock(&busdma_mtx);
214	if (!map) {
215		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
216		if (map)
217			map->flags = DMAMAP_ALLOCATED;
218	} else
219		map->flags = 0;
220	return (map);
221}
222
223static __inline void
224_busdma_free_dmamap(bus_dmamap_t map)
225{
226	if (map->flags & DMAMAP_ALLOCATED)
227		free(map, M_DEVBUF);
228	else {
229		mtx_lock(&busdma_mtx);
230		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
231		mtx_unlock(&busdma_mtx);
232	}
233}
234
235int
236bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
237		   bus_size_t boundary, bus_addr_t lowaddr,
238		   bus_addr_t highaddr, bus_dma_filter_t *filter,
239		   void *filterarg, bus_size_t maxsize, int nsegments,
240		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
241		   void *lockfuncarg, bus_dma_tag_t *dmat)
242{
243#ifndef NO_DMA
244	bus_dma_tag_t newtag;
245	int error = 0;
246
247	/* Basic sanity checking */
248	if (boundary != 0 && boundary < maxsegsz)
249		maxsegsz = boundary;
250
251	/* Return a NULL tag on failure */
252	*dmat = NULL;
253
254	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
255	    M_ZERO | M_NOWAIT);
256	if (newtag == NULL) {
257		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
258		    __func__, newtag, 0, error);
259		return (ENOMEM);
260	}
261
262	newtag->parent = parent;
263	newtag->alignment = alignment;
264	newtag->boundary = boundary;
265	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
266	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
267	    (PAGE_SIZE - 1);
268	newtag->filter = filter;
269	newtag->filterarg = filterarg;
270	newtag->maxsize = maxsize;
271	newtag->nsegments = nsegments;
272	newtag->maxsegsz = maxsegsz;
273	newtag->flags = flags;
274	newtag->ref_count = 1; /* Count ourself */
275	newtag->map_count = 0;
276	newtag->_wbase = 0;
277	newtag->_physbase = 0;
278	/* XXXMIPS: Should we limit window size to amount of physical memory */
279	newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START;
280	if (lockfunc != NULL) {
281		newtag->lockfunc = lockfunc;
282		newtag->lockfuncarg = lockfuncarg;
283	} else {
284		newtag->lockfunc = dflt_lock;
285		newtag->lockfuncarg = NULL;
286	}
287
288	/* Take into account any restrictions imposed by our parent tag */
289	if (parent != NULL) {
290		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
291		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
292		if (newtag->boundary == 0)
293			newtag->boundary = parent->boundary;
294		else if (parent->boundary != 0)
295			newtag->boundary = MIN(parent->boundary,
296					       newtag->boundary);
297		if (newtag->filter == NULL) {
298			/*
299			 * Short circuit looking at our parent directly
300			 * since we have encapsulated all of its information
301			 */
302			newtag->filter = parent->filter;
303			newtag->filterarg = parent->filterarg;
304			newtag->parent = parent->parent;
305		}
306		if (newtag->parent != NULL)
307			atomic_add_int(&parent->ref_count, 1);
308	}
309
310	if (error != 0) {
311		free(newtag, M_DEVBUF);
312	} else {
313		*dmat = newtag;
314	}
315	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
316	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
317	return (error);
318#else
319	return ENOSYS;
320#endif
321
322}
323
324int
325bus_dma_tag_destroy(bus_dma_tag_t dmat)
326{
327#ifdef KTR
328	bus_dma_tag_t dmat_copy = dmat;
329#endif
330
331	if (dmat != NULL) {
332
333                if (dmat->map_count != 0)
334                        return (EBUSY);
335
336                while (dmat != NULL) {
337                        bus_dma_tag_t parent;
338
339                        parent = dmat->parent;
340                        atomic_subtract_int(&dmat->ref_count, 1);
341                        if (dmat->ref_count == 0) {
342                                free(dmat, M_DEVBUF);
343                                /*
344                                 * Last reference count, so
345                                 * release our reference
346                                 * count on our parent.
347                                 */
348                                dmat = parent;
349                        } else
350                                dmat = NULL;
351                }
352        }
353	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
354
355        return (0);
356}
357
358/*
359 * Allocate a handle for mapping from kva/uva/physical
360 * address space into bus device space.
361 */
362int
363bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
364{
365	bus_dmamap_t newmap;
366#ifdef KTR
367	int error = 0;
368#endif
369
370	newmap = _busdma_alloc_dmamap();
371	if (newmap == NULL) {
372		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
373		return (ENOMEM);
374	}
375	*mapp = newmap;
376	newmap->dmat = dmat;
377	dmat->map_count++;
378
379	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
380	    __func__, dmat, dmat->flags, error);
381
382	return (0);
383
384}
385
386/*
387 * Destroy a handle for mapping from kva/uva/physical
388 * address space into bus device space.
389 */
390int
391bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
392{
393	_busdma_free_dmamap(map);
394        dmat->map_count--;
395	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
396        return (0);
397}
398
399/*
400 * Allocate a piece of memory that can be efficiently mapped into
401 * bus device space based on the constraints lited in the dma tag.
402 * A dmamap to for use with dmamap_load is also allocated.
403 */
404int
405bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
406                 bus_dmamap_t *mapp)
407{
408	bus_dmamap_t newmap = NULL;
409
410	int mflags;
411
412	if (flags & BUS_DMA_NOWAIT)
413		mflags = M_NOWAIT;
414	else
415		mflags = M_WAITOK;
416	if (flags & BUS_DMA_ZERO)
417		mflags |= M_ZERO;
418
419	newmap = _busdma_alloc_dmamap();
420	if (newmap == NULL) {
421		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
422		    __func__, dmat, dmat->flags, ENOMEM);
423		return (ENOMEM);
424	}
425	dmat->map_count++;
426	*mapp = newmap;
427	newmap->dmat = dmat;
428
429        if (dmat->maxsize <= PAGE_SIZE) {
430                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
431        } else {
432                /*
433                 * XXX Use Contigmalloc until it is merged into this facility
434                 *     and handles multi-seg allocations.  Nobody is doing
435                 *     multi-seg allocations yet though.
436                 */
437	         vm_paddr_t maxphys;
438	         if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) {
439		   /* Note in the else case I just put in what was already
440		    * being passed in dmat->lowaddr. I am not sure
441		    * how this would have worked. Since lowaddr is in the
442		    * max address postion. I would have thought that the
443		    * caller would have wanted dmat->highaddr. That is
444		    * presuming they are asking for physical addresses
445		    * which is what contigmalloc takes. - RRS
446		    */
447		   maxphys = MIPS_KSEG0_LARGEST_PHYS - 1;
448		 } else {
449		   maxphys = dmat->lowaddr;
450		 }
451                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
452                    0ul, maxphys, dmat->alignment? dmat->alignment : 1ul,
453                    dmat->boundary);
454        }
455        if (*vaddr == NULL) {
456		if (newmap != NULL) {
457			_busdma_free_dmamap(newmap);
458			dmat->map_count--;
459		}
460		*mapp = NULL;
461                return (ENOMEM);
462	}
463	if (flags & BUS_DMA_COHERENT) {
464		void *tmpaddr = (void *)*vaddr;
465
466		if (tmpaddr) {
467			tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
468			newmap->origbuffer = *vaddr;
469			newmap->allocbuffer = tmpaddr;
470			mips_dcache_wbinv_range((vm_offset_t)*vaddr,
471			    dmat->maxsize);
472			*vaddr = tmpaddr;
473		} else
474			newmap->origbuffer = newmap->allocbuffer = NULL;
475	} else
476		newmap->origbuffer = newmap->allocbuffer = NULL;
477        return (0);
478
479}
480
481/*
482 * Free a piece of memory and it's allocated dmamap, that was allocated
483 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
484 */
485void
486bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
487{
488	if (map->allocbuffer) {
489		KASSERT(map->allocbuffer == vaddr,
490		    ("Trying to freeing the wrong DMA buffer"));
491		vaddr = map->origbuffer;
492	}
493        if (dmat->maxsize <= PAGE_SIZE)
494		free(vaddr, M_DEVBUF);
495        else {
496		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
497	}
498	dmat->map_count--;
499	_busdma_free_dmamap(map);
500	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
501
502}
503
504/*
505 * Utility function to load a linear buffer.  lastaddrp holds state
506 * between invocations (for multiple-buffer loads).  segp contains
507 * the starting segment on entrance, and the ending segment on exit.
508 * first indicates if this is the first invocation of this function.
509 */
510static __inline int
511bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
512    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
513    int flags, vm_offset_t *lastaddrp, int *segp)
514{
515	bus_size_t sgsize;
516	bus_size_t bmask;
517	vm_offset_t curaddr, lastaddr;
518	vm_offset_t vaddr = (vm_offset_t)buf;
519	int seg;
520	int error = 0;
521
522	lastaddr = *lastaddrp;
523	bmask = ~(dmat->boundary - 1);
524
525	for (seg = *segp; buflen > 0 ; ) {
526		/*
527		 * Get the physical address for this segment.
528		 */
529		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
530		curaddr = pmap_kextract(vaddr);
531
532		/*
533		 * If we're beyond the current DMA window, indicate
534		 * that and try to fall back onto something else.
535		 */
536		if (curaddr < dmat->_physbase ||
537		    curaddr >= (dmat->_physbase + dmat->_wsize))
538			return (EINVAL);
539
540		/*
541		 * In a valid DMA range.  Translate the physical
542		 * memory address to an address in the DMA window.
543		 */
544		curaddr = (curaddr - dmat->_physbase) + dmat->_wbase;
545
546
547		/*
548		 * Compute the segment size, and adjust counts.
549		 */
550		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
551		if (buflen < sgsize)
552			sgsize = buflen;
553
554		/*
555		 * Insert chunk into a segment, coalescing with
556		 * the previous segment if possible.
557		 */
558		if (seg >= 0 && curaddr == lastaddr &&
559		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
560		    (dmat->boundary == 0 ||
561		     (segs[seg].ds_addr & bmask) ==
562		     (curaddr & bmask))) {
563			segs[seg].ds_len += sgsize;
564			goto segdone;
565		} else {
566			if (++seg >= dmat->nsegments)
567				break;
568			segs[seg].ds_addr = curaddr;
569			segs[seg].ds_len = sgsize;
570		}
571		if (error)
572			break;
573segdone:
574		lastaddr = curaddr + sgsize;
575		vaddr += sgsize;
576		buflen -= sgsize;
577	}
578
579	*segp = seg;
580	*lastaddrp = lastaddr;
581
582	/*
583	 * Did we fit?
584	 */
585	if (buflen != 0)
586		error = EFBIG;
587
588	return error;
589}
590
591/*
592 * Map the buffer buf into bus space using the dmamap map.
593 */
594int
595bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
596                bus_size_t buflen, bus_dmamap_callback_t *callback,
597                void *callback_arg, int flags)
598{
599     	vm_offset_t	lastaddr = 0;
600	int		error, nsegs = -1;
601#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
602	bus_dma_segment_t dm_segments[dmat->nsegments];
603#else
604	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
605#endif
606
607	KASSERT(dmat != NULL, ("dmatag is NULL"));
608	KASSERT(map != NULL, ("dmamap is NULL"));
609	map->flags &= ~DMAMAP_TYPE_MASK;
610	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
611	map->buffer = buf;
612	map->len = buflen;
613	error = bus_dmamap_load_buffer(dmat,
614	    dm_segments, map, buf, buflen, kernel_pmap,
615	    flags, &lastaddr, &nsegs);
616
617	if (error)
618		(*callback)(callback_arg, NULL, 0, error);
619	else
620		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
621
622	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
623	    __func__, dmat, dmat->flags, nsegs + 1, error);
624
625	return (0);
626
627}
628
629/*
630 * Like bus_dmamap_load(), but for mbufs.
631 */
632int
633bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
634		     bus_dmamap_callback2_t *callback, void *callback_arg,
635		     int flags)
636{
637#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
638	bus_dma_segment_t dm_segments[dmat->nsegments];
639#else
640	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
641#endif
642	int nsegs = -1, error = 0;
643
644	M_ASSERTPKTHDR(m0);
645
646	map->flags &= ~DMAMAP_TYPE_MASK;
647	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
648	map->buffer = m0;
649	map->len = 0;
650
651	if (m0->m_pkthdr.len <= dmat->maxsize) {
652		vm_offset_t lastaddr = 0;
653		struct mbuf *m;
654
655		for (m = m0; m != NULL && error == 0; m = m->m_next) {
656			if (m->m_len > 0) {
657				error = bus_dmamap_load_buffer(dmat,
658				    dm_segments, map, m->m_data, m->m_len,
659				    pmap_kernel(), flags, &lastaddr, &nsegs);
660				map->len += m->m_len;
661			}
662		}
663	} else {
664		error = EINVAL;
665	}
666
667	if (error) {
668		/*
669		 * force "no valid mappings" on error in callback.
670		 */
671		(*callback)(callback_arg, dm_segments, 0, 0, error);
672	} else {
673		(*callback)(callback_arg, dm_segments, nsegs + 1,
674		    m0->m_pkthdr.len, error);
675	}
676	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
677	    __func__, dmat, dmat->flags, error, nsegs + 1);
678
679	return (error);
680}
681
682int
683bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
684			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
685			int flags)
686{
687	int error = 0;
688
689	M_ASSERTPKTHDR(m0);
690
691	flags |= BUS_DMA_NOWAIT;
692	*nsegs = -1;
693	map->flags &= ~DMAMAP_TYPE_MASK;
694	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
695	map->buffer = m0;
696	map->len = 0;
697
698	if (m0->m_pkthdr.len <= dmat->maxsize) {
699		vm_offset_t lastaddr = 0;
700		struct mbuf *m;
701
702		for (m = m0; m != NULL && error == 0; m = m->m_next) {
703			if (m->m_len > 0) {
704				error = bus_dmamap_load_buffer(dmat, segs, map,
705				    m->m_data, m->m_len,
706				    pmap_kernel(), flags, &lastaddr, nsegs);
707				map->len += m->m_len;
708			}
709		}
710	} else {
711		error = EINVAL;
712	}
713
714	++*nsegs;
715	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
716	    __func__, dmat, dmat->flags, error, *nsegs);
717
718	return (error);
719
720}
721
722/*
723 * Like bus_dmamap_load(), but for uios.
724 */
725int
726bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
727    bus_dmamap_callback2_t *callback, void *callback_arg,
728    int flags)
729{
730
731	panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
732	return (0);
733}
734
735/*
736 * Release the mapping held by map.
737 */
738void
739_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
740{
741
742	return;
743}
744
745static __inline void
746bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
747{
748
749	switch (op) {
750	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
751		mips_dcache_wbinv_range((vm_offset_t)buf, len);
752		break;
753
754	case BUS_DMASYNC_PREREAD:
755#if 1
756		mips_dcache_wbinv_range((vm_offset_t)buf, len);
757#else
758		mips_dcache_inv_range((vm_offset_t)buf, len);
759#endif
760		break;
761
762	case BUS_DMASYNC_PREWRITE:
763		mips_dcache_wb_range((vm_offset_t)buf, len);
764		break;
765	}
766}
767
768void
769_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
770{
771	struct mbuf *m;
772	struct uio *uio;
773	int resid;
774	struct iovec *iov;
775
776
777	/*
778	 * Mixing PRE and POST operations is not allowed.
779	 */
780	if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
781	    (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
782		panic("_bus_dmamap_sync: mix PRE and POST");
783
784	/*
785	 * Since we're dealing with a virtually-indexed, write-back
786	 * cache, we need to do the following things:
787	 *
788	 *	PREREAD -- Invalidate D-cache.  Note we might have
789	 *	to also write-back here if we have to use an Index
790	 *	op, or if the buffer start/end is not cache-line aligned.
791	 *
792	 *	PREWRITE -- Write-back the D-cache.  If we have to use
793	 *	an Index op, we also have to invalidate.  Note that if
794	 *	we are doing PREREAD|PREWRITE, we can collapse everything
795	 *	into a single op.
796	 *
797	 *	POSTREAD -- Nothing.
798	 *
799	 *	POSTWRITE -- Nothing.
800	 */
801
802	/*
803	 * Flush the write buffer.
804	 * XXX Is this always necessary?
805	 */
806	mips_wbflush();
807
808	op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
809	if (op == 0)
810		return;
811
812	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
813	switch(map->flags & DMAMAP_TYPE_MASK) {
814	case DMAMAP_LINEAR:
815		bus_dmamap_sync_buf(map->buffer, map->len, op);
816		break;
817	case DMAMAP_MBUF:
818		m = map->buffer;
819		while (m) {
820			if (m->m_len > 0)
821				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
822			m = m->m_next;
823		}
824		break;
825	case DMAMAP_UIO:
826		uio = map->buffer;
827		iov = uio->uio_iov;
828		resid = uio->uio_resid;
829		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
830			bus_size_t minlen = resid < iov[i].iov_len ? resid :
831			    iov[i].iov_len;
832			if (minlen > 0) {
833				bus_dmamap_sync_buf(iov[i].iov_base, minlen, op);
834				resid -= minlen;
835			}
836		}
837		break;
838	default:
839		break;
840	}
841}
842