busdma_machdep.c revision 187237
1/*-
2 * Copyright (c) 2006 Fill this file and put your name here
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28/*-
29 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
30 * All rights reserved.
31 *
32 * This code is derived from software contributed to The NetBSD Foundation
33 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
34 * NASA Ames Research Center.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 *    must display the following acknowledgement:
46 *	This product includes software developed by the NetBSD
47 *	Foundation, Inc. and its contributors.
48 * 4. Neither the name of The NetBSD Foundation nor the names of its
49 *    contributors may be used to endorse or promote products derived
50 *    from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
53 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65/*	$NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $	*/
66
67#include <sys/cdefs.h>
68__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 187237 2009-01-14 20:27:49Z gonzo $");
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/malloc.h>
73#include <sys/bus.h>
74#include <sys/interrupt.h>
75#include <sys/lock.h>
76#include <sys/proc.h>
77#include <sys/mutex.h>
78#include <sys/mbuf.h>
79#include <sys/uio.h>
80#include <sys/ktr.h>
81#include <sys/kernel.h>
82
83#include <vm/vm.h>
84#include <vm/vm_page.h>
85#include <vm/vm_map.h>
86
87#include <machine/atomic.h>
88#include <machine/bus.h>
89#include <machine/cache.h>
90#include <machine/cpufunc.h>
91
92struct bus_dma_tag {
93	bus_dma_tag_t		parent;
94	bus_size_t		alignment;
95	bus_size_t		boundary;
96	bus_addr_t		lowaddr;
97	bus_addr_t		highaddr;
98	bus_dma_filter_t	*filter;
99	void			*filterarg;
100	bus_size_t		maxsize;
101	u_int			nsegments;
102	bus_size_t		maxsegsz;
103	int			flags;
104	int			ref_count;
105	int			map_count;
106	bus_dma_lock_t		*lockfunc;
107	void			*lockfuncarg;
108	/* XXX: machine-dependent fields */
109	vm_offset_t		_physbase;
110	vm_offset_t		_wbase;
111	vm_offset_t		_wsize;
112};
113
114#define DMAMAP_LINEAR		0x1
115#define DMAMAP_MBUF		0x2
116#define DMAMAP_UIO		0x4
117#define DMAMAP_ALLOCATED	0x10
118#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
119#define DMAMAP_COHERENT		0x8
120struct bus_dmamap {
121        bus_dma_tag_t	dmat;
122	int		flags;
123	void 		*buffer;
124	void		*origbuffer;
125	void		*allocbuffer;
126	TAILQ_ENTRY(bus_dmamap)	freelist;
127	int		len;
128};
129
130static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
131	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
132
133#define BUSDMA_STATIC_MAPS	500
134static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
135
136static struct mtx busdma_mtx;
137
138MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
139
140static void
141mips_dmamap_freelist_init(void *dummy)
142{
143	int i;
144
145	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
146		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
147}
148
149SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
150
151/*
152 * Check to see if the specified page is in an allowed DMA range.
153 */
154
155static __inline int
156bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
157    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
158    int flags, vm_offset_t *lastaddrp, int *segp);
159
160/*
161 * Convenience function for manipulating driver locks from busdma (during
162 * busdma_swi, for example).  Drivers that don't provide their own locks
163 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
164 * non-mutex locking scheme don't have to use this at all.
165 */
166void
167busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
168{
169	struct mtx *dmtx;
170
171	dmtx = (struct mtx *)arg;
172	switch (op) {
173	case BUS_DMA_LOCK:
174		mtx_lock(dmtx);
175		break;
176	case BUS_DMA_UNLOCK:
177		mtx_unlock(dmtx);
178		break;
179	default:
180		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
181	}
182}
183
184/*
185 * dflt_lock should never get called.  It gets put into the dma tag when
186 * lockfunc == NULL, which is only valid if the maps that are associated
187 * with the tag are meant to never be defered.
188 * XXX Should have a way to identify which driver is responsible here.
189 */
190#ifndef NO_DMA
191static void
192dflt_lock(void *arg, bus_dma_lock_op_t op)
193{
194#ifdef INVARIANTS
195	panic("driver error: busdma dflt_lock called");
196#else
197	printf("DRIVER_ERROR: busdma dflt_lock called\n");
198#endif
199}
200#endif
201
202static __inline bus_dmamap_t
203_busdma_alloc_dmamap(void)
204{
205	bus_dmamap_t map;
206
207	mtx_lock(&busdma_mtx);
208	map = TAILQ_FIRST(&dmamap_freelist);
209	if (map)
210		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
211	mtx_unlock(&busdma_mtx);
212	if (!map) {
213		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
214		if (map)
215			map->flags = DMAMAP_ALLOCATED;
216	} else
217		map->flags = 0;
218	return (map);
219}
220
221static __inline void
222_busdma_free_dmamap(bus_dmamap_t map)
223{
224	if (map->flags & DMAMAP_ALLOCATED)
225		free(map, M_DEVBUF);
226	else {
227		mtx_lock(&busdma_mtx);
228		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
229		mtx_unlock(&busdma_mtx);
230	}
231}
232
233int
234bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
235		   bus_size_t boundary, bus_addr_t lowaddr,
236		   bus_addr_t highaddr, bus_dma_filter_t *filter,
237		   void *filterarg, bus_size_t maxsize, int nsegments,
238		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
239		   void *lockfuncarg, bus_dma_tag_t *dmat)
240{
241#ifndef NO_DMA
242	bus_dma_tag_t newtag;
243	int error = 0;
244
245	/* Basic sanity checking */
246	if (boundary != 0 && boundary < maxsegsz)
247		maxsegsz = boundary;
248
249	/* Return a NULL tag on failure */
250	*dmat = NULL;
251
252	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
253	    M_ZERO | M_NOWAIT);
254	if (newtag == NULL) {
255		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
256		    __func__, newtag, 0, error);
257		return (ENOMEM);
258	}
259
260	newtag->parent = parent;
261	newtag->alignment = alignment;
262	newtag->boundary = boundary;
263	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
264	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
265	    (PAGE_SIZE - 1);
266	newtag->filter = filter;
267	newtag->filterarg = filterarg;
268	newtag->maxsize = maxsize;
269	newtag->nsegments = nsegments;
270	newtag->maxsegsz = maxsegsz;
271	newtag->flags = flags;
272	newtag->ref_count = 1; /* Count ourself */
273	newtag->map_count = 0;
274	newtag->_wbase = 0;
275	newtag->_physbase = 0;
276	/* XXXMIPS: Should we limit window size to amount of physical memory */
277	newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START;
278	if (lockfunc != NULL) {
279		newtag->lockfunc = lockfunc;
280		newtag->lockfuncarg = lockfuncarg;
281	} else {
282		newtag->lockfunc = dflt_lock;
283		newtag->lockfuncarg = NULL;
284	}
285
286	/* Take into account any restrictions imposed by our parent tag */
287	if (parent != NULL) {
288		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
289		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
290		if (newtag->boundary == 0)
291			newtag->boundary = parent->boundary;
292		else if (parent->boundary != 0)
293			newtag->boundary = MIN(parent->boundary,
294					       newtag->boundary);
295		if (newtag->filter == NULL) {
296			/*
297			 * Short circuit looking at our parent directly
298			 * since we have encapsulated all of its information
299			 */
300			newtag->filter = parent->filter;
301			newtag->filterarg = parent->filterarg;
302			newtag->parent = parent->parent;
303		}
304		if (newtag->parent != NULL)
305			atomic_add_int(&parent->ref_count, 1);
306	}
307
308	if (error != 0) {
309		free(newtag, M_DEVBUF);
310	} else {
311		*dmat = newtag;
312	}
313	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
314	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
315	return (error);
316#else
317	return ENOSYS;
318#endif
319
320}
321
322int
323bus_dma_tag_destroy(bus_dma_tag_t dmat)
324{
325#ifdef KTR
326	bus_dma_tag_t dmat_copy = dmat;
327#endif
328
329	if (dmat != NULL) {
330
331                if (dmat->map_count != 0)
332                        return (EBUSY);
333
334                while (dmat != NULL) {
335                        bus_dma_tag_t parent;
336
337                        parent = dmat->parent;
338                        atomic_subtract_int(&dmat->ref_count, 1);
339                        if (dmat->ref_count == 0) {
340                                free(dmat, M_DEVBUF);
341                                /*
342                                 * Last reference count, so
343                                 * release our reference
344                                 * count on our parent.
345                                 */
346                                dmat = parent;
347                        } else
348                                dmat = NULL;
349                }
350        }
351	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
352
353        return (0);
354}
355
356/*
357 * Allocate a handle for mapping from kva/uva/physical
358 * address space into bus device space.
359 */
360int
361bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
362{
363	bus_dmamap_t newmap;
364#ifdef KTR
365	int error = 0;
366#endif
367
368	newmap = _busdma_alloc_dmamap();
369	if (newmap == NULL) {
370		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
371		return (ENOMEM);
372	}
373	*mapp = newmap;
374	newmap->dmat = dmat;
375	dmat->map_count++;
376
377	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
378	    __func__, dmat, dmat->flags, error);
379
380	return (0);
381
382}
383
384/*
385 * Destroy a handle for mapping from kva/uva/physical
386 * address space into bus device space.
387 */
388int
389bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
390{
391	_busdma_free_dmamap(map);
392        dmat->map_count--;
393	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
394        return (0);
395}
396
397/*
398 * Allocate a piece of memory that can be efficiently mapped into
399 * bus device space based on the constraints lited in the dma tag.
400 * A dmamap to for use with dmamap_load is also allocated.
401 */
402int
403bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
404                 bus_dmamap_t *mapp)
405{
406	bus_dmamap_t newmap = NULL;
407
408	int mflags;
409
410	if (flags & BUS_DMA_NOWAIT)
411		mflags = M_NOWAIT;
412	else
413		mflags = M_WAITOK;
414	if (flags & BUS_DMA_ZERO)
415		mflags |= M_ZERO;
416
417	newmap = _busdma_alloc_dmamap();
418	if (newmap == NULL) {
419		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
420		    __func__, dmat, dmat->flags, ENOMEM);
421		return (ENOMEM);
422	}
423	dmat->map_count++;
424	*mapp = newmap;
425	newmap->dmat = dmat;
426
427        if (dmat->maxsize <= PAGE_SIZE) {
428                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
429        } else {
430                /*
431                 * XXX Use Contigmalloc until it is merged into this facility
432                 *     and handles multi-seg allocations.  Nobody is doing
433                 *     multi-seg allocations yet though.
434                 */
435	         vm_paddr_t maxphys;
436	         if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) {
437		   /* Note in the else case I just put in what was already
438		    * being passed in dmat->lowaddr. I am not sure
439		    * how this would have worked. Since lowaddr is in the
440		    * max address postion. I would have thought that the
441		    * caller would have wanted dmat->highaddr. That is
442		    * presuming they are asking for physical addresses
443		    * which is what contigmalloc takes. - RRS
444		    */
445		   maxphys = MIPS_KSEG0_LARGEST_PHYS - 1;
446		 } else {
447		   maxphys = dmat->lowaddr;
448		 }
449                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
450                    0ul, maxphys, dmat->alignment? dmat->alignment : 1ul,
451                    dmat->boundary);
452        }
453        if (*vaddr == NULL) {
454		if (newmap != NULL) {
455			_busdma_free_dmamap(newmap);
456			dmat->map_count--;
457		}
458		*mapp = NULL;
459                return (ENOMEM);
460	}
461	if (flags & BUS_DMA_COHERENT) {
462		void *tmpaddr = (void *)*vaddr;
463
464		if (tmpaddr) {
465			tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
466			newmap->origbuffer = *vaddr;
467			newmap->allocbuffer = tmpaddr;
468			mips_dcache_wbinv_range((vm_offset_t)*vaddr,
469			    dmat->maxsize);
470			*vaddr = tmpaddr;
471		} else
472			newmap->origbuffer = newmap->allocbuffer = NULL;
473	} else
474		newmap->origbuffer = newmap->allocbuffer = NULL;
475        return (0);
476
477}
478
479/*
480 * Free a piece of memory and it's allocated dmamap, that was allocated
481 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
482 */
483void
484bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
485{
486	if (map->allocbuffer) {
487		KASSERT(map->allocbuffer == vaddr,
488		    ("Trying to freeing the wrong DMA buffer"));
489		vaddr = map->origbuffer;
490	}
491        if (dmat->maxsize <= PAGE_SIZE)
492		free(vaddr, M_DEVBUF);
493        else {
494		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
495	}
496	dmat->map_count--;
497	_busdma_free_dmamap(map);
498	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
499
500}
501
502/*
503 * Utility function to load a linear buffer.  lastaddrp holds state
504 * between invocations (for multiple-buffer loads).  segp contains
505 * the starting segment on entrance, and the ending segment on exit.
506 * first indicates if this is the first invocation of this function.
507 */
508static __inline int
509bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
510    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
511    int flags, vm_offset_t *lastaddrp, int *segp)
512{
513	bus_size_t sgsize;
514	bus_size_t bmask;
515	vm_offset_t curaddr, lastaddr;
516	vm_offset_t vaddr = (vm_offset_t)buf;
517	int seg;
518	int error = 0;
519
520	lastaddr = *lastaddrp;
521	bmask = ~(dmat->boundary - 1);
522
523	for (seg = *segp; buflen > 0 ; ) {
524		/*
525		 * Get the physical address for this segment.
526		 */
527		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
528		curaddr = pmap_kextract(vaddr);
529
530		/*
531		 * If we're beyond the current DMA window, indicate
532		 * that and try to fall back onto something else.
533		 */
534		if (curaddr < dmat->_physbase ||
535		    curaddr >= (dmat->_physbase + dmat->_wsize))
536			return (EINVAL);
537
538		/*
539		 * In a valid DMA range.  Translate the physical
540		 * memory address to an address in the DMA window.
541		 */
542		curaddr = (curaddr - dmat->_physbase) + dmat->_wbase;
543
544
545		/*
546		 * Compute the segment size, and adjust counts.
547		 */
548		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
549		if (buflen < sgsize)
550			sgsize = buflen;
551
552		/*
553		 * Insert chunk into a segment, coalescing with
554		 * the previous segment if possible.
555		 */
556		if (seg >= 0 && curaddr == lastaddr &&
557		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
558		    (dmat->boundary == 0 ||
559		     (segs[seg].ds_addr & bmask) ==
560		     (curaddr & bmask))) {
561			segs[seg].ds_len += sgsize;
562			goto segdone;
563		} else {
564			if (++seg >= dmat->nsegments)
565				break;
566			segs[seg].ds_addr = curaddr;
567			segs[seg].ds_len = sgsize;
568		}
569		if (error)
570			break;
571segdone:
572		lastaddr = curaddr + sgsize;
573		vaddr += sgsize;
574		buflen -= sgsize;
575	}
576
577	*segp = seg;
578	*lastaddrp = lastaddr;
579
580	/*
581	 * Did we fit?
582	 */
583	if (buflen != 0)
584		error = EFBIG;
585
586	return error;
587}
588
589/*
590 * Map the buffer buf into bus space using the dmamap map.
591 */
592int
593bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
594                bus_size_t buflen, bus_dmamap_callback_t *callback,
595                void *callback_arg, int flags)
596{
597     	vm_offset_t	lastaddr = 0;
598	int		error, nsegs = -1;
599#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
600	bus_dma_segment_t dm_segments[dmat->nsegments];
601#else
602	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
603#endif
604
605	KASSERT(dmat != NULL, ("dmatag is NULL"));
606	KASSERT(map != NULL, ("dmamap is NULL"));
607	map->flags &= ~DMAMAP_TYPE_MASK;
608	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
609	map->buffer = buf;
610	map->len = buflen;
611	error = bus_dmamap_load_buffer(dmat,
612	    dm_segments, map, buf, buflen, kernel_pmap,
613	    flags, &lastaddr, &nsegs);
614
615	if (error)
616		(*callback)(callback_arg, NULL, 0, error);
617	else
618		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
619
620	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
621	    __func__, dmat, dmat->flags, nsegs + 1, error);
622
623	return (0);
624
625}
626
627/*
628 * Like bus_dmamap_load(), but for mbufs.
629 */
630int
631bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
632		     bus_dmamap_callback2_t *callback, void *callback_arg,
633		     int flags)
634{
635#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
636	bus_dma_segment_t dm_segments[dmat->nsegments];
637#else
638	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
639#endif
640	int nsegs = -1, error = 0;
641
642	M_ASSERTPKTHDR(m0);
643
644	map->flags &= ~DMAMAP_TYPE_MASK;
645	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
646	map->buffer = m0;
647	map->len = 0;
648
649	if (m0->m_pkthdr.len <= dmat->maxsize) {
650		vm_offset_t lastaddr = 0;
651		struct mbuf *m;
652
653		for (m = m0; m != NULL && error == 0; m = m->m_next) {
654			if (m->m_len > 0) {
655				error = bus_dmamap_load_buffer(dmat,
656				    dm_segments, map, m->m_data, m->m_len,
657				    pmap_kernel(), flags, &lastaddr, &nsegs);
658				map->len += m->m_len;
659			}
660		}
661	} else {
662		error = EINVAL;
663	}
664
665	if (error) {
666		/*
667		 * force "no valid mappings" on error in callback.
668		 */
669		(*callback)(callback_arg, dm_segments, 0, 0, error);
670	} else {
671		(*callback)(callback_arg, dm_segments, nsegs + 1,
672		    m0->m_pkthdr.len, error);
673	}
674	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
675	    __func__, dmat, dmat->flags, error, nsegs + 1);
676
677	return (error);
678}
679
680int
681bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
682			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
683			int flags)
684{
685	int error = 0;
686
687	M_ASSERTPKTHDR(m0);
688
689	flags |= BUS_DMA_NOWAIT;
690	*nsegs = -1;
691	map->flags &= ~DMAMAP_TYPE_MASK;
692	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
693	map->buffer = m0;
694	map->len = 0;
695
696	if (m0->m_pkthdr.len <= dmat->maxsize) {
697		vm_offset_t lastaddr = 0;
698		struct mbuf *m;
699
700		for (m = m0; m != NULL && error == 0; m = m->m_next) {
701			if (m->m_len > 0) {
702				error = bus_dmamap_load_buffer(dmat, segs, map,
703				    m->m_data, m->m_len,
704				    pmap_kernel(), flags, &lastaddr, nsegs);
705				map->len += m->m_len;
706			}
707		}
708	} else {
709		error = EINVAL;
710	}
711
712	++*nsegs;
713	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
714	    __func__, dmat, dmat->flags, error, *nsegs);
715
716	return (error);
717
718}
719
720/*
721 * Like bus_dmamap_load(), but for uios.
722 */
723int
724bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
725    bus_dmamap_callback2_t *callback, void *callback_arg,
726    int flags)
727{
728
729	panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
730	return (0);
731}
732
733/*
734 * Release the mapping held by map.
735 */
736void
737_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
738{
739
740	return;
741}
742
743static __inline void
744bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
745{
746
747	switch (op) {
748	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
749		mips_dcache_wbinv_range((vm_offset_t)buf, len);
750		break;
751
752	case BUS_DMASYNC_PREREAD:
753#if 1
754		mips_dcache_wbinv_range((vm_offset_t)buf, len);
755#else
756		mips_dcache_inv_range((vm_offset_t)buf, len);
757#endif
758		break;
759
760	case BUS_DMASYNC_PREWRITE:
761		mips_dcache_wb_range((vm_offset_t)buf, len);
762		break;
763	}
764}
765
766void
767_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
768{
769	struct mbuf *m;
770	struct uio *uio;
771	int resid;
772	struct iovec *iov;
773
774
775	/*
776	 * Mixing PRE and POST operations is not allowed.
777	 */
778	if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
779	    (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
780		panic("_bus_dmamap_sync: mix PRE and POST");
781
782	/*
783	 * Since we're dealing with a virtually-indexed, write-back
784	 * cache, we need to do the following things:
785	 *
786	 *	PREREAD -- Invalidate D-cache.  Note we might have
787	 *	to also write-back here if we have to use an Index
788	 *	op, or if the buffer start/end is not cache-line aligned.
789	 *
790	 *	PREWRITE -- Write-back the D-cache.  If we have to use
791	 *	an Index op, we also have to invalidate.  Note that if
792	 *	we are doing PREREAD|PREWRITE, we can collapse everything
793	 *	into a single op.
794	 *
795	 *	POSTREAD -- Nothing.
796	 *
797	 *	POSTWRITE -- Nothing.
798	 */
799
800	/*
801	 * Flush the write buffer.
802	 * XXX Is this always necessary?
803	 */
804	mips_wbflush();
805
806	op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
807	if (op == 0)
808		return;
809
810	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
811	switch(map->flags & DMAMAP_TYPE_MASK) {
812	case DMAMAP_LINEAR:
813		bus_dmamap_sync_buf(map->buffer, map->len, op);
814		break;
815	case DMAMAP_MBUF:
816		m = map->buffer;
817		while (m) {
818			if (m->m_len > 0)
819				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
820			m = m->m_next;
821		}
822		break;
823	case DMAMAP_UIO:
824		uio = map->buffer;
825		iov = uio->uio_iov;
826		resid = uio->uio_resid;
827		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
828			bus_size_t minlen = resid < iov[i].iov_len ? resid :
829			    iov[i].iov_len;
830			if (minlen > 0) {
831				bus_dmamap_sync_buf(iov[i].iov_base, minlen, op);
832				resid -= minlen;
833			}
834		}
835		break;
836	default:
837		break;
838	}
839}
840