1/*	$NetBSD: bus_dma.c,v 1.48 2024/06/04 20:43:58 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 1997, 1998, 2001, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
34
35__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.48 2024/06/04 20:43:58 riastradh Exp $");
36
37#define _MIPS_BUS_DMA_PRIVATE
38
39#include <sys/param.h>
40#include <sys/bus.h>
41#include <sys/cpu.h>
42#include <sys/device.h>
43#include <sys/evcnt.h>
44#include <sys/kernel.h>
45#include <sys/kmem.h>
46#include <sys/mbuf.h>
47#include <sys/proc.h>
48#include <sys/systm.h>
49
50#include <uvm/uvm.h>
51
52#include <mips/cache.h>
53#ifdef _LP64
54#include <mips/mips3_pte.h>
55#endif
56
57#include <mips/locore.h>
58
59const struct mips_bus_dmamap_ops mips_bus_dmamap_ops =
60    _BUS_DMAMAP_OPS_INITIALIZER;
61const struct mips_bus_dmamem_ops mips_bus_dmamem_ops =
62    _BUS_DMAMEM_OPS_INITIALIZER;
63const struct mips_bus_dmatag_ops mips_bus_dmatag_ops =
64    _BUS_DMATAG_OPS_INITIALIZER;
65
66static struct evcnt bus_dma_creates =
67	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "creates");
68static struct evcnt bus_dma_bounced_creates =
69	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced creates");
70static struct evcnt bus_dma_loads =
71	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "loads");
72static struct evcnt bus_dma_bounced_loads =
73	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced loads");
74static struct evcnt bus_dma_read_bounces =
75	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "read bounces");
76static struct evcnt bus_dma_write_bounces =
77	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "write bounces");
78static struct evcnt bus_dma_bounced_unloads =
79	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced unloads");
80static struct evcnt bus_dma_unloads =
81	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "unloads");
82static struct evcnt bus_dma_bounced_destroys =
83	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "bounced destroys");
84static struct evcnt bus_dma_destroys =
85	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "busdma", "destroys");
86
87EVCNT_ATTACH_STATIC(bus_dma_creates);
88EVCNT_ATTACH_STATIC(bus_dma_bounced_creates);
89EVCNT_ATTACH_STATIC(bus_dma_loads);
90EVCNT_ATTACH_STATIC(bus_dma_bounced_loads);
91EVCNT_ATTACH_STATIC(bus_dma_read_bounces);
92EVCNT_ATTACH_STATIC(bus_dma_write_bounces);
93EVCNT_ATTACH_STATIC(bus_dma_unloads);
94EVCNT_ATTACH_STATIC(bus_dma_bounced_unloads);
95EVCNT_ATTACH_STATIC(bus_dma_destroys);
96EVCNT_ATTACH_STATIC(bus_dma_bounced_destroys);
97
98#define	STAT_INCR(x)	(bus_dma_ ## x.ev_count++)
99
100paddr_t kvtophys(vaddr_t);	/* XXX */
101
102/*
103 * Utility function to load a linear buffer.  segp contains the starting
104 * segment on entrance, and the ending segment on exit. first indicates
105 * if this is the first invocation of this function.
106 */
107static int
108_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map,
109    void *buf, bus_size_t buflen, struct vmspace *vm, int flags,
110    int *segp, vaddr_t lastvaddr, bool first)
111{
112	paddr_t baddr, curaddr, lastaddr;
113	vaddr_t vaddr = (vaddr_t)buf;
114	bus_dma_segment_t *ds = &map->dm_segs[*segp];
115	bus_dma_segment_t * const eds = &map->dm_segs[map->_dm_segcnt];
116	const bus_addr_t bmask = ~(map->_dm_boundary - 1);
117	const bool d_cache_coherent =
118	    (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) != 0;
119
120	lastaddr = ds->ds_addr + ds->ds_len;
121
122	while (buflen > 0) {
123		/*
124		 * Get the physical address for this segment.
125		 */
126		if (!VMSPACE_IS_KERNEL_P(vm))
127			(void) pmap_extract(vm_map_pmap(&vm->vm_map), vaddr,
128			    &curaddr);
129		else
130			curaddr = kvtophys(vaddr);
131
132		/*
133		 * If we're beyond the current DMA window, indicate
134		 * that and try to fall back onto something else.
135		 */
136		if (curaddr < t->_bounce_alloc_lo
137		    || (t->_bounce_alloc_hi != 0
138			&& curaddr >= t->_bounce_alloc_hi))
139			return EINVAL;
140#if BUS_DMA_DEBUG
141		printf("dma: addr %#"PRIxPADDR" -> %#"PRIxPADDR"\n", curaddr,
142		    (curaddr - t->_bounce_alloc_lo) + t->_wbase);
143#endif
144		curaddr = (curaddr - t->_bounce_alloc_lo) + t->_wbase;
145
146		/*
147		 * Compute the segment size, and adjust counts.
148		 */
149		bus_size_t sgsize = PAGE_SIZE - ((uintptr_t)vaddr & PGOFSET);
150		if (sgsize > buflen) {
151			sgsize = buflen;
152		}
153		if (sgsize > map->dm_maxsegsz) {
154			sgsize = map->dm_maxsegsz;
155		}
156
157		/*
158		 * Make sure we don't cross any boundaries.
159		 */
160		if (map->_dm_boundary > 0) {
161			baddr = (curaddr + map->_dm_boundary) & bmask;
162			if (sgsize > baddr - curaddr) {
163				sgsize = baddr - curaddr;
164			}
165		}
166
167		/*
168		 * Insert chunk into a segment, coalescing with
169		 * the previous segment if possible.
170		 */
171		if (!first
172		    && curaddr == lastaddr
173		    && (d_cache_coherent
174#ifndef __mips_o32
175			|| !MIPS_CACHE_VIRTUAL_ALIAS
176#endif
177			|| vaddr == lastvaddr)
178		    && (ds->ds_len + sgsize) <= map->dm_maxsegsz
179		    && (map->_dm_boundary == 0
180			|| ((ds->ds_addr ^ curaddr) & bmask) == 0)) {
181			ds->ds_len += sgsize;
182		} else {
183			if (!first && ++ds >= eds)
184				break;
185			ds->ds_addr = curaddr;
186			ds->ds_len = sgsize;
187			ds->_ds_vaddr = (intptr_t)vaddr;
188			first = false;
189			/*
190			 * If this segment uses the correct color, try to see
191			 * if we can use a direct-mapped VA for the segment.
192			 */
193			if (!mips_cache_badalias(curaddr, vaddr)) {
194#ifdef __mips_o32
195				if (MIPS_KSEG0_P(curaddr + sgsize - 1)) {
196					ds->_ds_vaddr =
197					    MIPS_PHYS_TO_KSEG0(curaddr);
198				}
199#else
200				/*
201				 * All physical addresses can be accessed
202				 * via XKPHYS.
203				 */
204		    		ds->_ds_vaddr =
205				    MIPS_PHYS_TO_XKPHYS_CACHED(curaddr);
206#endif
207			}
208			/* Make sure this is a valid kernel address */
209			KASSERTMSG(ds->_ds_vaddr < 0,
210			    "_ds_vaddr %#"PRIxREGISTER, ds->_ds_vaddr);
211		}
212
213		lastaddr = curaddr + sgsize;
214		vaddr += sgsize;
215		buflen -= sgsize;
216		lastvaddr = vaddr;
217	}
218
219	*segp = ds - map->dm_segs;
220
221	/*
222	 * Did we fit?
223	 */
224	if (buflen != 0) {
225		/*
226		 * If there is a chained window, we will automatically
227		 * fall back to it.
228		 */
229		return EFBIG;		/* XXX better return value here? */
230	}
231
232	return 0;
233}
234
235#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
236static int _bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
237    bus_size_t size, int flags);
238static void _bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map);
239static int _bus_dma_uiomove(void *buf, struct uio *uio, size_t n,
240    int direction);
241
242static int
243_bus_dma_load_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
244    size_t buflen, int buftype, int flags)
245{
246	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
247	struct vmspace * const vm = vmspace_kernel();
248	int seg, error;
249
250	KASSERT(cookie != NULL);
251	KASSERT(cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE);
252
253	/*
254	 * Allocate bounce pages, if necessary.
255	 */
256	if ((cookie->id_flags & _BUS_DMA_HAS_BOUNCE) == 0) {
257		error = _bus_dma_alloc_bouncebuf(t, map, buflen, flags);
258		if (error)
259			return error;
260	}
261
262	/*
263	 * Cache a pointer to the caller's buffer and load the DMA map
264	 * with the bounce buffer.
265	 */
266	cookie->id_origbuf = buf;
267	cookie->id_origbuflen = buflen;
268	cookie->id_buftype = buftype;
269	seg = 0;
270	error = _bus_dmamap_load_buffer(t, map, cookie->id_bouncebuf,
271	    buflen, vm, flags, &seg, 0, true);
272	if (error)
273		return error;
274
275	STAT_INCR(bounced_loads);
276	map->dm_mapsize = buflen;
277	map->dm_nsegs = seg + 1;
278	map->_dm_vmspace = vm;
279	/*
280	 * If our cache is coherent, then the map must be coherent too.
281	 */
282	if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
283		map->_dm_flags |= _BUS_DMAMAP_COHERENT;
284
285	/* ...so _bus_dmamap_sync() knows we're bouncing */
286	cookie->id_flags |= _BUS_DMA_IS_BOUNCING;
287	return 0;
288}
289#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
290
291static size_t
292_bus_dmamap_mapsize(int const nsegments)
293{
294	KASSERT(nsegments > 0);
295	return sizeof(struct mips_bus_dmamap) +
296	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
297}
298
299#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
300static size_t
301_bus_dmamap_cookiesize(int const nsegments)
302{
303	KASSERT(nsegments > 0);
304	return sizeof(struct mips_bus_dma_cookie) +
305	    (sizeof(bus_dma_segment_t) * nsegments);
306}
307#endif
308
309/*
310 * Common function for DMA map creation.  May be called by bus-specific
311 * DMA map creation functions.
312 */
313int
314_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
315    bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
316{
317	struct mips_bus_dmamap *map;
318	void *mapstore;
319	const int allocflags =
320	    ((flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
321
322	int error = 0;
323
324	/*
325	 * Allocate and initialize the DMA map.  The end of the map
326	 * is a variable-sized array of segments, so we allocate enough
327	 * room for them in one shot.
328	 *
329	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
330	 * of ALLOCNOW notifies others that we've reserved these resources,
331	 * and they are not to be freed.
332	 *
333	 * The bus_dmamap_t includes one bus_dma_segment_t, hence
334	 * the (nsegments - 1).
335	 */
336	if ((mapstore = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
337	     allocflags)) == NULL)
338		return ENOMEM;
339
340	map = mapstore;
341	map->_dm_size = size;
342	map->_dm_segcnt = nsegments;
343	map->_dm_maxmaxsegsz = maxsegsz;
344	map->_dm_boundary = boundary;
345	map->_dm_bounce_thresh = t->_bounce_thresh;
346	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
347	map->_dm_vmspace = NULL;
348	map->dm_maxsegsz = maxsegsz;
349	map->dm_mapsize = 0;		/* no valid mappings */
350	map->dm_nsegs = 0;
351
352	*dmamp = map;
353
354#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
355	struct mips_bus_dma_cookie *cookie;
356	int cookieflags;
357	void *cookiestore;
358
359	if (t->_bounce_thresh == 0 || _BUS_AVAIL_END <= t->_bounce_thresh - 1)
360		map->_dm_bounce_thresh = 0;
361	cookieflags = 0;
362
363	if (t->_may_bounce != NULL) {
364		error = (*t->_may_bounce)(t, map, flags, &cookieflags);
365		if (error != 0)
366			goto out;
367	}
368
369	if (map->_dm_bounce_thresh != 0)
370		cookieflags |= _BUS_DMA_MIGHT_NEED_BOUNCE;
371
372	if ((cookieflags & _BUS_DMA_MIGHT_NEED_BOUNCE) == 0) {
373		STAT_INCR(creates);
374		return 0;
375	}
376
377	/*
378	 * Allocate our cookie.
379	 */
380	if ((cookiestore = kmem_zalloc(_bus_dmamap_cookiesize(nsegments),
381		    allocflags)) == NULL) {
382		error = ENOMEM;
383		goto out;
384	}
385	cookie = (struct mips_bus_dma_cookie *)cookiestore;
386	cookie->id_flags = cookieflags;
387	map->_dm_cookie = cookie;
388	STAT_INCR(bounced_creates);
389
390	error = _bus_dma_alloc_bouncebuf(t, map, size, flags);
391 out:
392	if (error)
393		_bus_dmamap_destroy(t, map);
394#else
395	STAT_INCR(creates);
396#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
397
398	return error;
399}
400
401/*
402 * Common function for DMA map destruction.  May be called by bus-specific
403 * DMA map destruction functions.
404 */
405void
406_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
407{
408
409#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
410	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
411
412	/*
413	 * Free any bounce pages this map might hold.
414	 */
415	if (cookie != NULL) {
416		if (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
417			STAT_INCR(bounced_unloads);
418		map->dm_nsegs = 0;
419		if (cookie->id_flags & _BUS_DMA_HAS_BOUNCE)
420			_bus_dma_free_bouncebuf(t, map);
421		STAT_INCR(bounced_destroys);
422		kmem_free(cookie, _bus_dmamap_cookiesize(map->_dm_segcnt));
423	} else
424#endif
425	STAT_INCR(destroys);
426	if (map->dm_nsegs > 0)
427		STAT_INCR(unloads);
428	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
429}
430
431/*
432 * Common function for loading a direct-mapped DMA map with a linear
433 * buffer.  Called by bus-specific DMA map load functions with the
434 * OR value appropriate for indicating "direct-mapped" for that
435 * chipset.
436 */
437int
438_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
439    bus_size_t buflen, struct proc *p, int flags)
440{
441	int seg, error;
442	struct vmspace *vm;
443
444	if (map->dm_nsegs > 0) {
445#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
446		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
447		if (cookie != NULL) {
448			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
449				STAT_INCR(bounced_unloads);
450				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
451			}
452			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
453		} else
454#endif
455		STAT_INCR(unloads);
456	}
457	/*
458	 * Make sure that on error condition we return "no valid mappings".
459	 */
460	map->dm_mapsize = 0;
461	map->dm_nsegs = 0;
462	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
463
464	if (buflen > map->_dm_size)
465		return EINVAL;
466
467	if (p != NULL) {
468		vm = p->p_vmspace;
469	} else {
470		vm = vmspace_kernel();
471	}
472
473	seg = 0;
474	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
475	    vm, flags, &seg, 0, true);
476	if (error == 0) {
477		map->dm_mapsize = buflen;
478		map->dm_nsegs = seg + 1;
479		map->_dm_vmspace = vm;
480
481		STAT_INCR(loads);
482
483		/*
484		 * For linear buffers, we support marking the mapping
485		 * as COHERENT.
486		 *
487		 * XXX Check TLB entries for cache-inhibit bits?
488		 */
489		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
490			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
491		else if (MIPS_KSEG1_P(buf))
492			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
493#ifdef _LP64
494		else if (MIPS_XKPHYS_P((vaddr_t)buf) &&
495		    (MIPS_XKPHYS_TO_CCA((vaddr_t)buf) ==
496			MIPS3_PG_TO_CCA(MIPS3_PG_UNCACHED)))
497			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
498#endif
499		return 0;
500	}
501#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
502	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
503	if (cookie != NULL &&
504	    (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
505		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
506		    _BUS_DMA_BUFTYPE_LINEAR, flags);
507	}
508#endif
509	return error;
510}
511
512/*
513 * Like _bus_dmamap_load(), but for mbufs.
514 */
515int
516_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map,
517    struct mbuf *m0, int flags)
518{
519	int seg, error;
520	struct mbuf *m;
521	struct vmspace *vm = vmspace_kernel();
522
523	if (map->dm_nsegs > 0) {
524#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
525		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
526		if (cookie != NULL) {
527			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
528				STAT_INCR(bounced_unloads);
529				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
530			}
531			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
532		} else
533#endif
534		STAT_INCR(unloads);
535	}
536
537	/*
538	 * Make sure that on error condition we return "no valid mappings."
539	 */
540	map->dm_mapsize = 0;
541	map->dm_nsegs = 0;
542	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
543
544#ifdef DIAGNOSTIC
545	if ((m0->m_flags & M_PKTHDR) == 0)
546		panic("_bus_dmamap_load_mbuf: no packet header");
547#endif
548
549	if (m0->m_pkthdr.len > map->_dm_size)
550		return EINVAL;
551
552	vaddr_t lastvaddr = 0;
553	bool first = true;
554	seg = 0;
555	error = 0;
556	for (m = m0; m != NULL && error == 0; m = m->m_next) {
557		if (m->m_len == 0)
558			continue;
559		error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len,
560		    vm, flags, &seg, lastvaddr, first);
561		first = false;
562		lastvaddr = (vaddr_t)m->m_data + m->m_len;
563	}
564	if (error == 0) {
565		map->dm_mapsize = m0->m_pkthdr.len;
566		map->dm_nsegs = seg + 1;
567		map->_dm_vmspace = vm;		/* always kernel */
568		/*
569		 * If our cache is coherent, then the map must be coherent too.
570		 */
571		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
572			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
573		return 0;
574	}
575#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
576	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
577	if (cookie != NULL &&
578	    (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
579		error = _bus_dma_load_bouncebuf(t, map, m0, m0->m_pkthdr.len,
580		    _BUS_DMA_BUFTYPE_MBUF, flags);
581	}
582#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
583	return error;
584}
585
586/*
587 * Like _bus_dmamap_load(), but for uios.
588 */
589int
590_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map,
591    struct uio *uio, int flags)
592{
593	int seg, i, error;
594	bus_size_t minlen, resid;
595	struct iovec *iov;
596	void *addr;
597
598	if (map->dm_nsegs > 0) {
599#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
600		struct mips_bus_dma_cookie *const cookie = map->_dm_cookie;
601		if (cookie != NULL) {
602			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
603				STAT_INCR(bounced_unloads);
604				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
605			}
606			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
607		} else
608#endif
609		STAT_INCR(unloads);
610	}
611	/*
612	 * Make sure that on error condition we return "no valid mappings."
613	 */
614	map->dm_mapsize = 0;
615	map->dm_nsegs = 0;
616	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
617
618	resid = uio->uio_resid;
619	iov = uio->uio_iov;
620
621	vaddr_t lastvaddr = 0;
622	bool first = true;
623	seg = 0;
624	error = 0;
625	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
626		/*
627		 * Now at the first iovec to load.  Load each iovec
628		 * until we have exhausted the residual count.
629		 */
630		minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
631		addr = (void *)iov[i].iov_base;
632
633		error = _bus_dmamap_load_buffer(t, map, addr, minlen,
634		    uio->uio_vmspace, flags, &seg, lastvaddr, first);
635		first = false;
636		lastvaddr = (vaddr_t)addr + minlen;
637
638		resid -= minlen;
639	}
640	if (error == 0) {
641		map->dm_mapsize = uio->uio_resid;
642		map->dm_nsegs = seg + 1;
643		map->_dm_vmspace = uio->uio_vmspace;
644		/*
645		 * If our cache is coherent, then the map must be coherent too.
646		 */
647		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
648			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
649		return 0;
650	}
651#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
652	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
653	if (cookie != NULL &&
654	    (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
655		error = _bus_dma_load_bouncebuf(t, map, uio, uio->uio_resid,
656		    _BUS_DMA_BUFTYPE_UIO, flags);
657	}
658#endif
659	return error;
660}
661
662/*
663 * Like _bus_dmamap_load(), but for raw memory.
664 */
665int
666_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
667    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
668{
669
670	struct vmspace *const vm = vmspace_kernel();
671	const bool coherent_p =
672	    (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT);
673	const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0;
674	bus_size_t mapsize = 0;
675	vaddr_t lastvaddr = 0;
676	bool first = true;
677	int curseg = 0;
678	int error = 0;
679
680	for (; error == 0 && nsegs-- > 0; segs++) {
681		void *kva;
682#ifdef _LP64
683		if (cached_p) {
684			kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
685			    segs->ds_addr);
686		} else {
687			kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
688			    segs->ds_addr);
689		}
690#else
691		if (segs->ds_addr >= MIPS_PHYS_MASK)
692			return EFBIG;
693		if (cached_p) {
694			kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
695		} else {
696			kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
697		}
698#endif	/* _LP64 */
699		mapsize += segs->ds_len;
700		error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
701		    vm, flags, &curseg, lastvaddr, first);
702		first = false;
703		lastvaddr = (vaddr_t)kva + segs->ds_len;
704	}
705	if (error == 0) {
706		map->dm_mapsize = mapsize;
707		map->dm_nsegs = curseg + 1;
708		map->_dm_vmspace = vm;		/* always kernel */
709		/*
710		 * If our cache is coherent, then the map must be coherent too.
711		 */
712		if (coherent_p)
713			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
714		return 0;
715	}
716	/*
717	 * If bus_dmamem_alloc didn't return memory that didn't need bouncing
718	 * that's a bug which we will not workaround.
719	 */
720	return error;
721}
722
723/*
724 * Common function for unloading a DMA map.  May be called by
725 * chipset-specific DMA map unload functions.
726 */
727void
728_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
729{
730	if (map->dm_nsegs > 0) {
731#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
732		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
733		if (cookie != NULL) {
734			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
735				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
736				STAT_INCR(bounced_unloads);
737			}
738			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
739		} else
740#endif
741
742		STAT_INCR(unloads);
743	}
744	/*
745	 * No resources to free; just mark the mappings as
746	 * invalid.
747	 */
748	map->dm_maxsegsz = map->_dm_maxmaxsegsz;
749	map->dm_mapsize = 0;
750	map->dm_nsegs = 0;
751	map->_dm_flags &= ~_BUS_DMAMAP_COHERENT;
752}
753
754/*
755 * Common function for DMA map synchronization.  May be called
756 * by chipset-specific DMA map synchronization functions.
757 *
758 * This version works with the virtually-indexed, write-back cache
759 * found in the MIPS-3/MIPS-4 CPUs available for the Algorithmics.
760 */
761void
762_bus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
763    bus_size_t len, int ops)
764{
765	bus_size_t minlen;
766
767#ifdef DIAGNOSTIC
768	/*
769	 * Mixing PRE and POST operations is not allowed.
770	 */
771	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
772	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
773		panic("_bus_dmamap_sync: mix PRE and POST");
774
775	if (offset >= map->dm_mapsize)
776		panic("%s: bad offset 0x%jx >= 0x%jx", __func__,
777		    (intmax_t)offset, (intmax_t)map->dm_mapsize);
778	if ((offset + len) > map->dm_mapsize)
779		panic("%s: bad length 0x%jx + 0x%jx > 0x%jx", __func__,
780		    (intmax_t)offset, (intmax_t)len,
781		    (intmax_t)map->dm_mapsize);
782#endif
783
784	/*
785	 * Since we're dealing with a virtually-indexed, write-back
786	 * cache, we need to do the following things:
787	 *
788	 *	PREREAD -- Invalidate D-cache.  Note we might have
789	 *	to also write-back here if we have to use an Index
790	 *	op, or if the buffer start/end is not cache-line aligned.
791	 *
792	 *	PREWRITE -- Write-back the D-cache.  If we have to use
793	 *	an Index op, we also have to invalidate.  Note that if
794	 *	we are doing PREREAD|PREWRITE, we can collapse everything
795	 *	into a single op.
796	 *
797	 *	POSTREAD -- Nothing.
798	 *
799	 *	POSTWRITE -- Nothing.
800	 */
801#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
802	struct mips_bus_dma_cookie * const cookie = map->_dm_cookie;
803	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_IS_BOUNCING)
804	    && (ops & BUS_DMASYNC_PREWRITE) && len != 0) {
805		STAT_INCR(write_bounces);
806		/*
807		 * Copy the caller's buffer to the bounce buffer.
808		 */
809		switch (cookie->id_buftype) {
810		case _BUS_DMA_BUFTYPE_LINEAR:
811			memcpy((char *)cookie->id_bouncebuf + offset,
812			    cookie->id_origlinearbuf + offset, len);
813			break;
814		case _BUS_DMA_BUFTYPE_MBUF:
815			m_copydata(cookie->id_origmbuf, offset, len,
816			    (char *)cookie->id_bouncebuf + offset);
817			break;
818		case _BUS_DMA_BUFTYPE_UIO:
819			_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
820			    cookie->id_origuio, len, UIO_WRITE);
821			break;
822#ifdef DIAGNOSTIC
823		case _BUS_DMA_BUFTYPE_RAW:
824			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
825			break;
826
827		case _BUS_DMA_BUFTYPE_INVALID:
828			panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
829			break;
830
831		default:
832			panic("_bus_dmamap_sync: unknown buffer type %d\n",
833			    cookie->id_buftype);
834			break;
835#endif /* DIAGNOSTIC */
836		}
837	}
838#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
839
840	/*
841	 * Flush the write buffer.
842	 * XXX Is this always necessary?
843	 */
844	wbflush();
845
846	/*
847	 * If the mapping is of COHERENT DMA-safe memory or this isn't a
848	 * PREREAD or PREWRITE, no cache flush is necessary.  Check to see
849	 * if we need to bounce it.
850	 */
851	if ((map->_dm_flags & _BUS_DMAMAP_COHERENT) ||
852	    (ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) == 0)
853		goto bounce_it;
854
855#ifdef __mips_o32
856	/*
857	 * If the mapping belongs to the kernel, or it belongs
858	 * to the currently-running process (XXX actually, vmspace),
859	 * then we can use Hit ops.  Otherwise, Index ops.
860	 *
861	 * This should be true the vast majority of the time.
862	 */
863	const bool useindex = (!VMSPACE_IS_KERNEL_P(map->_dm_vmspace) &&
864	    map->_dm_vmspace != curproc->p_vmspace);
865#endif
866
867	bus_dma_segment_t *seg = map->dm_segs;
868	bus_dma_segment_t *const lastseg = seg + map->dm_nsegs;
869	/*
870	 * Skip segments until offset are within a segment.
871	 */
872	for (; offset >= seg->ds_len; seg++) {
873		offset -= seg->ds_len;
874	}
875
876	for (; seg < lastseg && len != 0; seg++, offset = 0, len -= minlen) {
877		/*
878		 * Now at the first segment to sync; nail each segment until we
879		 * have exhausted the length.
880		 */
881		register_t vaddr = seg->_ds_vaddr + offset;
882		minlen = ulmin(len, seg->ds_len - offset);
883
884#ifdef BUS_DMA_DEBUG
885		printf("bus_dmamap_sync(ops=%d: flushing segment %p "
886		    "(0x%"PRIxREGISTER"+%"PRIxBUSADDR
887		    ", 0x%"PRIxREGISTER"+0x%"PRIxBUSADDR
888		    ") (olen = %"PRIxBUSADDR")...", ops, seg,
889		    vaddr - offset, offset,
890		    vaddr - offset, offset + minlen - 1, len);
891#endif
892
893		/*
894		 * If we are forced to use Index ops, it's always a
895		 * Write-back,Invalidate, so just do one test.
896		 */
897#ifdef __mips_o32
898		if (__predict_false(useindex || vaddr == 0)) {
899			mips_dcache_wbinv_range_index(vaddr, minlen);
900#ifdef BUS_DMA_DEBUG
901			printf("\n");
902#endif
903			continue;
904		}
905#endif
906
907		switch (ops) {
908		case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
909			mips_dcache_wbinv_range(vaddr, minlen);
910			break;
911
912		case BUS_DMASYNC_PREREAD: {
913			struct mips_cache_info *const mci = &mips_cache_info;
914			register_t start = vaddr;
915			register_t end = vaddr + minlen;
916			register_t preboundary, firstboundary, lastboundary;
917			register_t mask = mci->mci_dcache_align_mask;
918
919			preboundary = start & ~mask;
920			firstboundary = (start + mask) & ~mask;
921			lastboundary = end & ~mask;
922			if (preboundary < start && preboundary < lastboundary)
923				mips_dcache_wbinv_range(preboundary,
924				    mci->mci_dcache_align);
925			if (firstboundary < lastboundary)
926				mips_dcache_inv_range(firstboundary,
927				    lastboundary - firstboundary);
928			if (lastboundary < end)
929				mips_dcache_wbinv_range(lastboundary,
930				    mci->mci_dcache_align);
931			break;
932		}
933
934		case BUS_DMASYNC_PREWRITE:
935			mips_dcache_wb_range(vaddr, minlen);
936			break;
937		}
938#ifdef BUS_DMA_DEBUG
939		printf("\n");
940#endif
941	}
942
943  bounce_it:
944#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
945	if ((ops & BUS_DMASYNC_POSTREAD) == 0 ||
946	    cookie == NULL ||
947	    (cookie->id_flags & _BUS_DMA_IS_BOUNCING) == 0 ||
948	    len == 0)
949		return;
950
951	STAT_INCR(read_bounces);
952	/*
953	 * Copy the bounce buffer to the caller's buffer.
954	 */
955	switch (cookie->id_buftype) {
956	case _BUS_DMA_BUFTYPE_LINEAR:
957		memcpy(cookie->id_origlinearbuf + offset,
958		    (char *)cookie->id_bouncebuf + offset, len);
959		break;
960
961	case _BUS_DMA_BUFTYPE_MBUF:
962		m_copyback(cookie->id_origmbuf, offset, len,
963		    (char *)cookie->id_bouncebuf + offset);
964		break;
965
966	case _BUS_DMA_BUFTYPE_UIO:
967		_bus_dma_uiomove((char *)cookie->id_bouncebuf + offset,
968		    cookie->id_origuio, len, UIO_READ);
969		break;
970#ifdef DIAGNOSTIC
971	case _BUS_DMA_BUFTYPE_RAW:
972		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_RAW");
973		break;
974
975	case _BUS_DMA_BUFTYPE_INVALID:
976		panic("_bus_dmamap_sync: _BUS_DMA_BUFTYPE_INVALID");
977		break;
978
979	default:
980		panic("_bus_dmamap_sync: unknown buffer type %d\n",
981		    cookie->id_buftype);
982		break;
983#endif
984	}
985#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
986	return;
987}
988
989/*
990 * Common function for DMA-safe memory allocation.  May be called
991 * by bus-specific DMA memory allocation functions.
992 */
993int
994_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
995    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
996    int flags)
997{
998	bus_addr_t high;
999
1000	if (t->_bounce_alloc_hi != 0 &&
1001	    _BUS_AVAIL_END > t->_bounce_alloc_hi - 1)
1002		high = t->_bounce_alloc_hi - 1;
1003	else
1004		high = _BUS_AVAIL_END;
1005
1006	return _bus_dmamem_alloc_range(t, size, alignment, boundary,
1007	    segs, nsegs, rsegs, flags, t->_bounce_alloc_lo, high);
1008}
1009
1010/*
1011 * Allocate physical memory from the given physical address range.
1012 * Called by DMA-safe memory allocation methods.
1013 */
1014int
1015_bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
1016    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1017    int flags, paddr_t low, paddr_t high)
1018{
1019	paddr_t curaddr, lastaddr;
1020	struct vm_page *m;
1021	struct pglist mlist;
1022	int curseg, error;
1023
1024	/* Always round the size. */
1025	size = round_page(size);
1026
1027	/*
1028	 * Allocate pages from the VM system.
1029	 */
1030	error = uvm_pglistalloc(size, low, high, alignment, boundary,
1031	    &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
1032	if (error)
1033		return error;
1034
1035	/*
1036	 * Compute the location, size, and number of segments actually
1037	 * returned by the VM code.
1038	 */
1039	m = TAILQ_FIRST(&mlist);
1040	curseg = 0;
1041	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
1042	segs[curseg].ds_len = PAGE_SIZE;
1043	m = TAILQ_NEXT(m, pageq.queue);
1044
1045	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
1046		curaddr = VM_PAGE_TO_PHYS(m);
1047#ifdef DIAGNOSTIC
1048		if (curaddr < low || curaddr >= high) {
1049			printf("uvm_pglistalloc returned non-sensical"
1050			    " address 0x%"PRIxPADDR"\n", curaddr);
1051			panic("_bus_dmamem_alloc");
1052		}
1053#endif
1054		if (curaddr == (lastaddr + PAGE_SIZE))
1055			segs[curseg].ds_len += PAGE_SIZE;
1056		else {
1057			curseg++;
1058			segs[curseg].ds_addr = curaddr;
1059			segs[curseg].ds_len = PAGE_SIZE;
1060		}
1061		lastaddr = curaddr;
1062	}
1063
1064	*rsegs = curseg + 1;
1065
1066	return 0;
1067}
1068
1069/*
1070 * Common function for freeing DMA-safe memory.  May be called by
1071 * bus-specific DMA memory free functions.
1072 */
1073void
1074_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
1075{
1076	struct vm_page *m;
1077	bus_addr_t addr;
1078	struct pglist mlist;
1079	int curseg;
1080
1081	/*
1082	 * Build a list of pages to free back to the VM system.
1083	 */
1084	TAILQ_INIT(&mlist);
1085	for (curseg = 0; curseg < nsegs; curseg++) {
1086		for (addr = segs[curseg].ds_addr;
1087		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1088		    addr += PAGE_SIZE) {
1089			m = PHYS_TO_VM_PAGE(addr);
1090			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
1091		}
1092	}
1093
1094	uvm_pglistfree(&mlist);
1095}
1096
1097/*
1098 * Common function for mapping DMA-safe memory.  May be called by
1099 * bus-specific DMA memory map functions.
1100 */
1101int
1102_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1103    size_t size, void **kvap, int flags)
1104{
1105	vaddr_t va;
1106	bus_addr_t addr;
1107	int curseg;
1108	const uvm_flag_t kmflags =
1109	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1110
1111	/*
1112	 * If we're only mapping 1 segment, use K0SEG, to avoid
1113	 * TLB thrashing.
1114	 */
1115#ifdef _LP64
1116	if (nsegs == 1) {
1117		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
1118			== 0) &&
1119		    (flags & BUS_DMA_COHERENT))
1120			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
1121			    segs[0].ds_addr);
1122		else
1123			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
1124			    segs[0].ds_addr);
1125		return 0;
1126	}
1127#else
1128	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
1129		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
1130			== 0) &&
1131		    (flags & BUS_DMA_COHERENT))
1132			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
1133		else
1134			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
1135		return 0;
1136	}
1137#endif	/* _LP64 */
1138
1139	size = round_page(size);
1140
1141	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1142
1143	if (va == 0)
1144		return ENOMEM;
1145
1146	*kvap = (void *)va;
1147
1148	for (curseg = 0; curseg < nsegs; curseg++) {
1149		for (addr = trunc_page(segs[curseg].ds_addr);
1150		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
1151		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
1152			if (size == 0)
1153				panic("_bus_dmamem_map: size botch");
1154			pmap_enter(pmap_kernel(), va, addr,
1155			    VM_PROT_READ | VM_PROT_WRITE,
1156			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
1157		}
1158	}
1159	pmap_update(pmap_kernel());
1160
1161	return 0;
1162}
1163
1164/*
1165 * Common function for unmapping DMA-safe memory.  May be called by
1166 * bus-specific DMA memory unmapping functions.
1167 */
1168void
1169_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
1170{
1171
1172#ifdef DIAGNOSTIC
1173	if ((uintptr_t)kva & PGOFSET)
1174		panic("_bus_dmamem_unmap: bad alignment on %p", kva);
1175#endif
1176
1177	/*
1178	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
1179	 * not in KSEG2 or XKSEG).
1180	 */
1181	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
1182		return;
1183#ifdef _LP64
1184	if (MIPS_XKPHYS_P((vaddr_t)kva))
1185		return;
1186#endif
1187
1188	size = round_page(size);
1189	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
1190	pmap_update(pmap_kernel());
1191	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1192}
1193
1194/*
1195 * Common function for mmap(2)'ing DMA-safe memory.  May be called by
1196 * bus-specific DMA mmap(2)'ing functions.
1197 */
1198paddr_t
1199_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1200    off_t off, int prot, int flags)
1201{
1202	int i;
1203	paddr_t pa;
1204
1205	for (i = 0; i < nsegs; i++) {
1206#ifdef DIAGNOSTIC
1207		if (off & PGOFSET)
1208			panic("_bus_dmamem_mmap: offset unaligned");
1209		if (segs[i].ds_addr & PGOFSET)
1210			panic("_bus_dmamem_mmap: segment unaligned");
1211		if (segs[i].ds_len & PGOFSET)
1212			panic("_bus_dmamem_mmap: segment size not multiple"
1213			    " of page size");
1214#endif
1215		if (off >= segs[i].ds_len) {
1216			off -= segs[i].ds_len;
1217			continue;
1218		}
1219
1220		pa = (paddr_t)segs[i].ds_addr + off;
1221
1222/*
1223 * This is for machines which use normal RAM as video memory, so userland can
1224 * mmap() it and treat it like device memory, which is normally uncached.
1225 * Needed for X11 on SGI O2, will likely be needed on things like CI20.
1226 */
1227#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
1228		if (flags & BUS_DMA_PREFETCHABLE)
1229			return mips_btop(pa | PGC_NOCACHE);
1230		else
1231			return mips_btop(pa);
1232#else
1233		return mips_btop(pa);
1234#endif
1235	}
1236
1237	/* Page not found. */
1238	return -1;
1239}
1240
1241#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
1242static int
1243_bus_dma_alloc_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map,
1244    bus_size_t size, int flags)
1245{
1246	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
1247	int error = 0;
1248
1249#ifdef DIAGNOSTIC
1250	if (cookie == NULL)
1251		panic("_bus_dma_alloc_bouncebuf: no cookie");
1252#endif
1253
1254	cookie->id_bouncebuflen = round_page(size);
1255	error = _bus_dmamem_alloc(t, cookie->id_bouncebuflen,
1256	    PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
1257	    map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
1258	if (error)
1259		goto out;
1260	error = _bus_dmamem_map(t, cookie->id_bouncesegs,
1261	    cookie->id_nbouncesegs, cookie->id_bouncebuflen,
1262	    (void **)&cookie->id_bouncebuf, flags);
1263
1264 out:
1265	if (error) {
1266		_bus_dmamem_free(t, cookie->id_bouncesegs,
1267		    cookie->id_nbouncesegs);
1268		cookie->id_bouncebuflen = 0;
1269		cookie->id_nbouncesegs = 0;
1270	} else {
1271		cookie->id_flags |= _BUS_DMA_HAS_BOUNCE;
1272	}
1273
1274	return error;
1275}
1276
1277static void
1278_bus_dma_free_bouncebuf(bus_dma_tag_t t, bus_dmamap_t map)
1279{
1280	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
1281
1282#ifdef DIAGNOSTIC
1283	if (cookie == NULL)
1284		panic("_bus_dma_alloc_bouncebuf: no cookie");
1285#endif
1286
1287	_bus_dmamem_unmap(t, cookie->id_bouncebuf, cookie->id_bouncebuflen);
1288	_bus_dmamem_free(t, cookie->id_bouncesegs, cookie->id_nbouncesegs);
1289	cookie->id_bouncebuflen = 0;
1290	cookie->id_nbouncesegs = 0;
1291	cookie->id_flags &= ~_BUS_DMA_HAS_BOUNCE;
1292}
1293
1294/*
1295 * This function does the same as uiomove, but takes an explicit
1296 * direction, and does not update the uio structure.
1297 */
1298static int
1299_bus_dma_uiomove(void *buf, struct uio *uio, size_t n, int direction)
1300{
1301	struct iovec *iov;
1302	int error;
1303	struct vmspace *vm;
1304	char *cp;
1305	size_t resid, cnt;
1306	int i;
1307
1308	iov = uio->uio_iov;
1309	vm = uio->uio_vmspace;
1310	cp = buf;
1311	resid = n;
1312
1313	for (i = 0; i < uio->uio_iovcnt && resid > 0; i++) {
1314		iov = &uio->uio_iov[i];
1315		if (iov->iov_len == 0)
1316			continue;
1317		cnt = MIN(resid, iov->iov_len);
1318
1319		if (!VMSPACE_IS_KERNEL_P(vm)) {
1320			preempt_point();
1321		}
1322		if (direction == UIO_READ) {
1323			error = copyout_vmspace(vm, cp, iov->iov_base, cnt);
1324		} else {
1325			error = copyin_vmspace(vm, iov->iov_base, cp, cnt);
1326		}
1327		if (error)
1328			return error;
1329		cp += cnt;
1330		resid -= cnt;
1331	}
1332	return 0;
1333}
1334#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
1335
1336int
1337_bus_dmatag_subregion(bus_dma_tag_t tag, bus_addr_t min_addr,
1338    bus_addr_t max_addr, bus_dma_tag_t *newtag, int flags)
1339{
1340
1341#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
1342	if (((tag->_bounce_thresh != 0 &&
1343		    max_addr >= tag->_bounce_thresh - 1 &&
1344		    tag->_bounce_alloc_hi != 0 &&
1345		    max_addr >= tag->_bounce_alloc_hi - 1) ||
1346		(tag->_bounce_alloc_hi == 0 && max_addr > _BUS_AVAIL_END)) &&
1347	    (min_addr <= tag->_bounce_alloc_lo)) {
1348		*newtag = tag;
1349		/* if the tag must be freed, add a reference */
1350		if (tag->_tag_needs_free)
1351			tag->_tag_needs_free++;
1352		return 0;
1353	}
1354
1355	if ((*newtag = kmem_alloc(sizeof(struct mips_bus_dma_tag),
1356		    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1357		return ENOMEM;
1358
1359	**newtag = *tag;
1360	(*newtag)->_tag_needs_free = 1;
1361
1362	if (tag->_bounce_thresh == 0 || max_addr < tag->_bounce_thresh)
1363		(*newtag)->_bounce_thresh = max_addr;
1364	if (tag->_bounce_alloc_hi == 0 || max_addr < tag->_bounce_alloc_hi)
1365		(*newtag)->_bounce_alloc_hi = max_addr;
1366	if (min_addr > tag->_bounce_alloc_lo)
1367		(*newtag)->_bounce_alloc_lo = min_addr;
1368	(*newtag)->_wbase +=
1369	    (*newtag)->_bounce_alloc_lo - tag->_bounce_alloc_lo;
1370
1371	return 0;
1372#else
1373	return EOPNOTSUPP;
1374#endif /* _MIPS_NEED_BUS_DMA_BOUNCE */
1375}
1376
1377void
1378_bus_dmatag_destroy(bus_dma_tag_t tag)
1379{
1380#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
1381	switch (tag->_tag_needs_free) {
1382	case 0:
1383		break;				/* not allocated with malloc */
1384	case 1:
1385		kmem_free(tag, sizeof(*tag));	/* last reference to tag */
1386		break;
1387	default:
1388		tag->_tag_needs_free--;		/* one less reference */
1389	}
1390#endif
1391}
1392