busdma_machdep.c revision 195162
1/*-
2 * Copyright (c) 2006 Oleksandr Tymoshenko
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. The name of the author may not be used to endorse or promote products
12 *    derived from this software without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28/*-
29 * Copyright (c) 1997, 1998, 2001 The NetBSD Foundation, Inc.
30 * All rights reserved.
31 *
32 * This code is derived from software contributed to The NetBSD Foundation
33 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
34 * NASA Ames Research Center.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 *    must display the following acknowledgement:
46 *	This product includes software developed by the NetBSD
47 *	Foundation, Inc. and its contributors.
48 * 4. Neither the name of The NetBSD Foundation nor the names of its
49 *    contributors may be used to endorse or promote products derived
50 *    from this software without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
53 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65/*	$NetBSD: bus_dma.c,v 1.17 2006/03/01 12:38:11 yamt Exp $	*/
66
67#include <sys/cdefs.h>
68__FBSDID("$FreeBSD: head/sys/mips/mips/busdma_machdep.c 195162 2009-06-29 16:45:50Z imp $");
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/malloc.h>
73#include <sys/bus.h>
74#include <sys/interrupt.h>
75#include <sys/lock.h>
76#include <sys/proc.h>
77#include <sys/mutex.h>
78#include <sys/mbuf.h>
79#include <sys/uio.h>
80#include <sys/ktr.h>
81#include <sys/kernel.h>
82
83#include <vm/vm.h>
84#include <vm/vm_page.h>
85#include <vm/vm_map.h>
86
87#include <machine/atomic.h>
88#include <machine/bus.h>
89#include <machine/cache.h>
90#include <machine/cpufunc.h>
91
92struct bus_dma_tag {
93	bus_dma_tag_t		parent;
94	bus_size_t		alignment;
95	bus_size_t		boundary;
96	bus_addr_t		lowaddr;
97	bus_addr_t		highaddr;
98	bus_dma_filter_t	*filter;
99	void			*filterarg;
100	bus_size_t		maxsize;
101	u_int			nsegments;
102	bus_size_t		maxsegsz;
103	int			flags;
104	int			ref_count;
105	int			map_count;
106	bus_dma_lock_t		*lockfunc;
107	void			*lockfuncarg;
108	/* XXX: machine-dependent fields */
109	vm_offset_t		_physbase;
110	vm_offset_t		_wbase;
111	vm_offset_t		_wsize;
112};
113
114#define DMAMAP_LINEAR		0x1
115#define DMAMAP_MBUF		0x2
116#define DMAMAP_UIO		0x4
117#define DMAMAP_ALLOCATED	0x10
118#define DMAMAP_TYPE_MASK	(DMAMAP_LINEAR|DMAMAP_MBUF|DMAMAP_UIO)
119#define DMAMAP_COHERENT		0x8
120struct bus_dmamap {
121        bus_dma_tag_t	dmat;
122	int		flags;
123	void 		*buffer;
124	void		*origbuffer;
125	void		*allocbuffer;
126	TAILQ_ENTRY(bus_dmamap)	freelist;
127	int		len;
128};
129
130static TAILQ_HEAD(,bus_dmamap) dmamap_freelist =
131	TAILQ_HEAD_INITIALIZER(dmamap_freelist);
132
133#define BUSDMA_STATIC_MAPS	500
134static struct bus_dmamap map_pool[BUSDMA_STATIC_MAPS];
135
136static struct mtx busdma_mtx;
137
138MTX_SYSINIT(busdma_mtx, &busdma_mtx, "busdma lock", MTX_DEF);
139
140static void
141mips_dmamap_freelist_init(void *dummy)
142{
143	int i;
144
145	for (i = 0; i < BUSDMA_STATIC_MAPS; i++)
146		TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist);
147}
148
149SYSINIT(busdma, SI_SUB_VM, SI_ORDER_ANY, mips_dmamap_freelist_init, NULL);
150
151/*
152 * Check to see if the specified page is in an allowed DMA range.
153 */
154
155static __inline int
156bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
157    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
158    int flags, vm_offset_t *lastaddrp, int *segp);
159
160/*
161 * Convenience function for manipulating driver locks from busdma (during
162 * busdma_swi, for example).  Drivers that don't provide their own locks
163 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
164 * non-mutex locking scheme don't have to use this at all.
165 */
166void
167busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
168{
169	struct mtx *dmtx;
170
171	dmtx = (struct mtx *)arg;
172	switch (op) {
173	case BUS_DMA_LOCK:
174		mtx_lock(dmtx);
175		break;
176	case BUS_DMA_UNLOCK:
177		mtx_unlock(dmtx);
178		break;
179	default:
180		panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
181	}
182}
183
184/*
185 * dflt_lock should never get called.  It gets put into the dma tag when
186 * lockfunc == NULL, which is only valid if the maps that are associated
187 * with the tag are meant to never be defered.
188 * XXX Should have a way to identify which driver is responsible here.
189 */
190static void
191dflt_lock(void *arg, bus_dma_lock_op_t op)
192{
193#ifdef INVARIANTS
194	panic("driver error: busdma dflt_lock called");
195#else
196	printf("DRIVER_ERROR: busdma dflt_lock called\n");
197#endif
198}
199
200static __inline bus_dmamap_t
201_busdma_alloc_dmamap(void)
202{
203	bus_dmamap_t map;
204
205	mtx_lock(&busdma_mtx);
206	map = TAILQ_FIRST(&dmamap_freelist);
207	if (map)
208		TAILQ_REMOVE(&dmamap_freelist, map, freelist);
209	mtx_unlock(&busdma_mtx);
210	if (!map) {
211		map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT | M_ZERO);
212		if (map)
213			map->flags = DMAMAP_ALLOCATED;
214	} else
215		map->flags = 0;
216	return (map);
217}
218
219static __inline void
220_busdma_free_dmamap(bus_dmamap_t map)
221{
222	if (map->flags & DMAMAP_ALLOCATED)
223		free(map, M_DEVBUF);
224	else {
225		mtx_lock(&busdma_mtx);
226		TAILQ_INSERT_HEAD(&dmamap_freelist, map, freelist);
227		mtx_unlock(&busdma_mtx);
228	}
229}
230
231int
232bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
233		   bus_size_t boundary, bus_addr_t lowaddr,
234		   bus_addr_t highaddr, bus_dma_filter_t *filter,
235		   void *filterarg, bus_size_t maxsize, int nsegments,
236		   bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
237		   void *lockfuncarg, bus_dma_tag_t *dmat)
238{
239	bus_dma_tag_t newtag;
240	int error = 0;
241
242	/* Basic sanity checking */
243	if (boundary != 0 && boundary < maxsegsz)
244		maxsegsz = boundary;
245
246	/* Return a NULL tag on failure */
247	*dmat = NULL;
248
249	newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF,
250	    M_ZERO | M_NOWAIT);
251	if (newtag == NULL) {
252		CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
253		    __func__, newtag, 0, error);
254		return (ENOMEM);
255	}
256
257	newtag->parent = parent;
258	newtag->alignment = alignment;
259	newtag->boundary = boundary;
260	newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1);
261	newtag->highaddr = trunc_page((vm_paddr_t)highaddr) +
262	    (PAGE_SIZE - 1);
263	newtag->filter = filter;
264	newtag->filterarg = filterarg;
265	newtag->maxsize = maxsize;
266	newtag->nsegments = nsegments;
267	newtag->maxsegsz = maxsegsz;
268	newtag->flags = flags;
269	newtag->ref_count = 1; /* Count ourself */
270	newtag->map_count = 0;
271	newtag->_wbase = 0;
272	newtag->_physbase = 0;
273	/* XXXMIPS: Should we limit window size to amount of physical memory */
274	newtag->_wsize = MIPS_KSEG1_START - MIPS_KSEG0_START;
275	if (lockfunc != NULL) {
276		newtag->lockfunc = lockfunc;
277		newtag->lockfuncarg = lockfuncarg;
278	} else {
279		newtag->lockfunc = dflt_lock;
280		newtag->lockfuncarg = NULL;
281	}
282
283	/* Take into account any restrictions imposed by our parent tag */
284	if (parent != NULL) {
285		newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr);
286		newtag->highaddr = MAX(parent->highaddr, newtag->highaddr);
287		if (newtag->boundary == 0)
288			newtag->boundary = parent->boundary;
289		else if (parent->boundary != 0)
290			newtag->boundary = MIN(parent->boundary,
291					       newtag->boundary);
292		if (newtag->filter == NULL) {
293			/*
294			 * Short circuit looking at our parent directly
295			 * since we have encapsulated all of its information
296			 */
297			newtag->filter = parent->filter;
298			newtag->filterarg = parent->filterarg;
299			newtag->parent = parent->parent;
300		}
301		if (newtag->parent != NULL)
302			atomic_add_int(&parent->ref_count, 1);
303	}
304
305	if (error != 0) {
306		free(newtag, M_DEVBUF);
307	} else {
308		*dmat = newtag;
309	}
310	CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d",
311	    __func__, newtag, (newtag != NULL ? newtag->flags : 0), error);
312	return (error);
313}
314
315int
316bus_dma_tag_destroy(bus_dma_tag_t dmat)
317{
318#ifdef KTR
319	bus_dma_tag_t dmat_copy = dmat;
320#endif
321
322	if (dmat != NULL) {
323
324                if (dmat->map_count != 0)
325                        return (EBUSY);
326
327                while (dmat != NULL) {
328                        bus_dma_tag_t parent;
329
330                        parent = dmat->parent;
331                        atomic_subtract_int(&dmat->ref_count, 1);
332                        if (dmat->ref_count == 0) {
333                                free(dmat, M_DEVBUF);
334                                /*
335                                 * Last reference count, so
336                                 * release our reference
337                                 * count on our parent.
338                                 */
339                                dmat = parent;
340                        } else
341                                dmat = NULL;
342                }
343        }
344	CTR2(KTR_BUSDMA, "%s tag %p", __func__, dmat_copy);
345
346        return (0);
347}
348
349/*
350 * Allocate a handle for mapping from kva/uva/physical
351 * address space into bus device space.
352 */
353int
354bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
355{
356	bus_dmamap_t newmap;
357#ifdef KTR
358	int error = 0;
359#endif
360
361	newmap = _busdma_alloc_dmamap();
362	if (newmap == NULL) {
363		CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM);
364		return (ENOMEM);
365	}
366	*mapp = newmap;
367	newmap->dmat = dmat;
368	dmat->map_count++;
369
370	CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
371	    __func__, dmat, dmat->flags, error);
372
373	return (0);
374
375}
376
377/*
378 * Destroy a handle for mapping from kva/uva/physical
379 * address space into bus device space.
380 */
381int
382bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
383{
384	_busdma_free_dmamap(map);
385        dmat->map_count--;
386	CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
387        return (0);
388}
389
390/*
391 * Allocate a piece of memory that can be efficiently mapped into
392 * bus device space based on the constraints lited in the dma tag.
393 * A dmamap to for use with dmamap_load is also allocated.
394 */
395int
396bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
397                 bus_dmamap_t *mapp)
398{
399	bus_dmamap_t newmap = NULL;
400
401	int mflags;
402
403	if (flags & BUS_DMA_NOWAIT)
404		mflags = M_NOWAIT;
405	else
406		mflags = M_WAITOK;
407	if (flags & BUS_DMA_ZERO)
408		mflags |= M_ZERO;
409
410	newmap = _busdma_alloc_dmamap();
411	if (newmap == NULL) {
412		CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
413		    __func__, dmat, dmat->flags, ENOMEM);
414		return (ENOMEM);
415	}
416	dmat->map_count++;
417	*mapp = newmap;
418	newmap->dmat = dmat;
419
420        if (dmat->maxsize <= PAGE_SIZE) {
421                *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
422        } else {
423                /*
424                 * XXX Use Contigmalloc until it is merged into this facility
425                 *     and handles multi-seg allocations.  Nobody is doing
426                 *     multi-seg allocations yet though.
427                 */
428	         vm_paddr_t maxphys;
429	         if((uint32_t)dmat->lowaddr >= MIPS_KSEG0_LARGEST_PHYS) {
430		   /* Note in the else case I just put in what was already
431		    * being passed in dmat->lowaddr. I am not sure
432		    * how this would have worked. Since lowaddr is in the
433		    * max address postion. I would have thought that the
434		    * caller would have wanted dmat->highaddr. That is
435		    * presuming they are asking for physical addresses
436		    * which is what contigmalloc takes. - RRS
437		    */
438		   maxphys = MIPS_KSEG0_LARGEST_PHYS - 1;
439		 } else {
440		   maxphys = dmat->lowaddr;
441		 }
442                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
443                    0ul, maxphys, dmat->alignment? dmat->alignment : 1ul,
444                    dmat->boundary);
445        }
446        if (*vaddr == NULL) {
447		if (newmap != NULL) {
448			_busdma_free_dmamap(newmap);
449			dmat->map_count--;
450		}
451		*mapp = NULL;
452                return (ENOMEM);
453	}
454	if (flags & BUS_DMA_COHERENT) {
455		void *tmpaddr = (void *)*vaddr;
456
457		if (tmpaddr) {
458			tmpaddr = (void *)MIPS_PHYS_TO_KSEG1(vtophys(tmpaddr));
459			newmap->origbuffer = *vaddr;
460			newmap->allocbuffer = tmpaddr;
461			mips_dcache_wbinv_range((vm_offset_t)*vaddr,
462			    dmat->maxsize);
463			*vaddr = tmpaddr;
464		} else
465			newmap->origbuffer = newmap->allocbuffer = NULL;
466	} else
467		newmap->origbuffer = newmap->allocbuffer = NULL;
468        return (0);
469
470}
471
472/*
473 * Free a piece of memory and it's allocated dmamap, that was allocated
474 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
475 */
476void
477bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
478{
479	if (map->allocbuffer) {
480		KASSERT(map->allocbuffer == vaddr,
481		    ("Trying to freeing the wrong DMA buffer"));
482		vaddr = map->origbuffer;
483	}
484        if (dmat->maxsize <= PAGE_SIZE)
485		free(vaddr, M_DEVBUF);
486        else {
487		contigfree(vaddr, dmat->maxsize, M_DEVBUF);
488	}
489	dmat->map_count--;
490	_busdma_free_dmamap(map);
491	CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
492
493}
494
495/*
496 * Utility function to load a linear buffer.  lastaddrp holds state
497 * between invocations (for multiple-buffer loads).  segp contains
498 * the starting segment on entrance, and the ending segment on exit.
499 * first indicates if this is the first invocation of this function.
500 */
501static __inline int
502bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs,
503    bus_dmamap_t map, void *buf, bus_size_t buflen, struct pmap *pmap,
504    int flags, vm_offset_t *lastaddrp, int *segp)
505{
506	bus_size_t sgsize;
507	bus_size_t bmask;
508	vm_offset_t curaddr, lastaddr;
509	vm_offset_t vaddr = (vm_offset_t)buf;
510	int seg;
511	int error = 0;
512
513	lastaddr = *lastaddrp;
514	bmask = ~(dmat->boundary - 1);
515
516	for (seg = *segp; buflen > 0 ; ) {
517		/*
518		 * Get the physical address for this segment.
519		 */
520		KASSERT(kernel_pmap == pmap, ("pmap is not kernel pmap"));
521		curaddr = pmap_kextract(vaddr);
522
523		/*
524		 * If we're beyond the current DMA window, indicate
525		 * that and try to fall back onto something else.
526		 */
527		if (curaddr < dmat->_physbase ||
528		    curaddr >= (dmat->_physbase + dmat->_wsize))
529			return (EINVAL);
530
531		/*
532		 * In a valid DMA range.  Translate the physical
533		 * memory address to an address in the DMA window.
534		 */
535		curaddr = (curaddr - dmat->_physbase) + dmat->_wbase;
536
537
538		/*
539		 * Compute the segment size, and adjust counts.
540		 */
541		sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
542		if (buflen < sgsize)
543			sgsize = buflen;
544
545		/*
546		 * Insert chunk into a segment, coalescing with
547		 * the previous segment if possible.
548		 */
549		if (seg >= 0 && curaddr == lastaddr &&
550		    (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
551		    (dmat->boundary == 0 ||
552		     (segs[seg].ds_addr & bmask) ==
553		     (curaddr & bmask))) {
554			segs[seg].ds_len += sgsize;
555			goto segdone;
556		} else {
557			if (++seg >= dmat->nsegments)
558				break;
559			segs[seg].ds_addr = curaddr;
560			segs[seg].ds_len = sgsize;
561		}
562		if (error)
563			break;
564segdone:
565		lastaddr = curaddr + sgsize;
566		vaddr += sgsize;
567		buflen -= sgsize;
568	}
569
570	*segp = seg;
571	*lastaddrp = lastaddr;
572
573	/*
574	 * Did we fit?
575	 */
576	if (buflen != 0)
577		error = EFBIG;
578
579	return error;
580}
581
582/*
583 * Map the buffer buf into bus space using the dmamap map.
584 */
585int
586bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
587                bus_size_t buflen, bus_dmamap_callback_t *callback,
588                void *callback_arg, int flags)
589{
590     	vm_offset_t	lastaddr = 0;
591	int		error, nsegs = -1;
592#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
593	bus_dma_segment_t dm_segments[dmat->nsegments];
594#else
595	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
596#endif
597
598	KASSERT(dmat != NULL, ("dmatag is NULL"));
599	KASSERT(map != NULL, ("dmamap is NULL"));
600	map->flags &= ~DMAMAP_TYPE_MASK;
601	map->flags |= DMAMAP_LINEAR|DMAMAP_COHERENT;
602	map->buffer = buf;
603	map->len = buflen;
604	error = bus_dmamap_load_buffer(dmat,
605	    dm_segments, map, buf, buflen, kernel_pmap,
606	    flags, &lastaddr, &nsegs);
607
608	if (error)
609		(*callback)(callback_arg, NULL, 0, error);
610	else
611		(*callback)(callback_arg, dm_segments, nsegs + 1, error);
612
613	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
614	    __func__, dmat, dmat->flags, nsegs + 1, error);
615
616	return (0);
617
618}
619
620/*
621 * Like bus_dmamap_load(), but for mbufs.
622 */
623int
624bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0,
625		     bus_dmamap_callback2_t *callback, void *callback_arg,
626		     int flags)
627{
628#ifdef __CC_SUPPORTS_DYNAMIC_ARRAY_INIT
629	bus_dma_segment_t dm_segments[dmat->nsegments];
630#else
631	bus_dma_segment_t dm_segments[BUS_DMAMAP_NSEGS];
632#endif
633	int nsegs = -1, error = 0;
634
635	M_ASSERTPKTHDR(m0);
636
637	map->flags &= ~DMAMAP_TYPE_MASK;
638	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
639	map->buffer = m0;
640	map->len = 0;
641
642	if (m0->m_pkthdr.len <= dmat->maxsize) {
643		vm_offset_t lastaddr = 0;
644		struct mbuf *m;
645
646		for (m = m0; m != NULL && error == 0; m = m->m_next) {
647			if (m->m_len > 0) {
648				error = bus_dmamap_load_buffer(dmat,
649				    dm_segments, map, m->m_data, m->m_len,
650				    kernel_pmap, flags, &lastaddr, &nsegs);
651				map->len += m->m_len;
652			}
653		}
654	} else {
655		error = EINVAL;
656	}
657
658	if (error) {
659		/*
660		 * force "no valid mappings" on error in callback.
661		 */
662		(*callback)(callback_arg, dm_segments, 0, 0, error);
663	} else {
664		(*callback)(callback_arg, dm_segments, nsegs + 1,
665		    m0->m_pkthdr.len, error);
666	}
667	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
668	    __func__, dmat, dmat->flags, error, nsegs + 1);
669
670	return (error);
671}
672
673int
674bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
675			struct mbuf *m0, bus_dma_segment_t *segs, int *nsegs,
676			int flags)
677{
678	int error = 0;
679
680	M_ASSERTPKTHDR(m0);
681
682	flags |= BUS_DMA_NOWAIT;
683	*nsegs = -1;
684	map->flags &= ~DMAMAP_TYPE_MASK;
685	map->flags |= DMAMAP_MBUF | DMAMAP_COHERENT;
686	map->buffer = m0;
687	map->len = 0;
688
689	if (m0->m_pkthdr.len <= dmat->maxsize) {
690		vm_offset_t lastaddr = 0;
691		struct mbuf *m;
692
693		for (m = m0; m != NULL && error == 0; m = m->m_next) {
694			if (m->m_len > 0) {
695				error = bus_dmamap_load_buffer(dmat, segs, map,
696				    m->m_data, m->m_len,
697				    kernel_pmap, flags, &lastaddr, nsegs);
698				map->len += m->m_len;
699			}
700		}
701	} else {
702		error = EINVAL;
703	}
704
705	++*nsegs;
706	CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
707	    __func__, dmat, dmat->flags, error, *nsegs);
708
709	return (error);
710
711}
712
713/*
714 * Like bus_dmamap_load(), but for uios.
715 */
716int
717bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio,
718    bus_dmamap_callback2_t *callback, void *callback_arg,
719    int flags)
720{
721
722	panic("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__);
723	return (0);
724}
725
726/*
727 * Release the mapping held by map.
728 */
729void
730_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
731{
732
733	return;
734}
735
736static __inline void
737bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op)
738{
739
740	switch (op) {
741	case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
742		mips_dcache_wbinv_range((vm_offset_t)buf, len);
743		break;
744
745	case BUS_DMASYNC_PREREAD:
746#if 1
747		mips_dcache_wbinv_range((vm_offset_t)buf, len);
748#else
749		mips_dcache_inv_range((vm_offset_t)buf, len);
750#endif
751		break;
752
753	case BUS_DMASYNC_PREWRITE:
754		mips_dcache_wb_range((vm_offset_t)buf, len);
755		break;
756	}
757}
758
759void
760_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
761{
762	struct mbuf *m;
763	struct uio *uio;
764	int resid;
765	struct iovec *iov;
766
767
768	/*
769	 * Mixing PRE and POST operations is not allowed.
770	 */
771	if ((op & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
772	    (op & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
773		panic("_bus_dmamap_sync: mix PRE and POST");
774
775	/*
776	 * Since we're dealing with a virtually-indexed, write-back
777	 * cache, we need to do the following things:
778	 *
779	 *	PREREAD -- Invalidate D-cache.  Note we might have
780	 *	to also write-back here if we have to use an Index
781	 *	op, or if the buffer start/end is not cache-line aligned.
782	 *
783	 *	PREWRITE -- Write-back the D-cache.  If we have to use
784	 *	an Index op, we also have to invalidate.  Note that if
785	 *	we are doing PREREAD|PREWRITE, we can collapse everything
786	 *	into a single op.
787	 *
788	 *	POSTREAD -- Nothing.
789	 *
790	 *	POSTWRITE -- Nothing.
791	 */
792
793	/*
794	 * Flush the write buffer.
795	 * XXX Is this always necessary?
796	 */
797	mips_wbflush();
798
799	op &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
800	if (op == 0)
801		return;
802
803	CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags);
804	switch(map->flags & DMAMAP_TYPE_MASK) {
805	case DMAMAP_LINEAR:
806		bus_dmamap_sync_buf(map->buffer, map->len, op);
807		break;
808	case DMAMAP_MBUF:
809		m = map->buffer;
810		while (m) {
811			if (m->m_len > 0)
812				bus_dmamap_sync_buf(m->m_data, m->m_len, op);
813			m = m->m_next;
814		}
815		break;
816	case DMAMAP_UIO:
817		uio = map->buffer;
818		iov = uio->uio_iov;
819		resid = uio->uio_resid;
820		for (int i = 0; i < uio->uio_iovcnt && resid != 0; i++) {
821			bus_size_t minlen = resid < iov[i].iov_len ? resid :
822			    iov[i].iov_len;
823			if (minlen > 0) {
824				bus_dmamap_sync_buf(iov[i].iov_base, minlen, op);
825				resid -= minlen;
826			}
827		}
828		break;
829	default:
830		break;
831	}
832}
833