1/*-
2 * Copyright (c) 1999, 2000 Matthew R. Green
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *	from: NetBSD: iommu.c,v 1.82 2008/05/30 02:29:37 mrg Exp
27 */
28/*-
29 * Copyright (c) 1999-2002 Eduardo Horvath
30 * Copyright (c) 2001-2003 Thomas Moestl
31 * Copyright (c) 2007, 2009 Marius Strobl <marius@FreeBSD.org>
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 * 3. The name of the author may not be used to endorse or promote products
43 *    derived from this software without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
46 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
47 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
48 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
49 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
50 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
52 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
53 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 *	from: NetBSD: sbus.c,v 1.50 2002/06/20 18:26:24 eeh Exp
58 */
59
60#include <sys/cdefs.h>
61__FBSDID("$FreeBSD$");
62
63/*
64 * UltraSPARC IOMMU support; used by both the PCI and SBus code.
65 *
66 * TODO:
67 * - Support sub-page boundaries.
68 * - Fix alignment handling for small allocations (the possible page offset
69 *   of malloc()ed memory is not handled at all).  Revise interaction of
70 *   alignment with the load_mbuf and load_uio functions.
71 * - Handle lowaddr and highaddr in some way, and try to work out a way
72 *   for filter callbacks to work.  Currently, only lowaddr is honored
73 *   in that no addresses above it are considered at all.
74 * - Implement BUS_DMA_ALLOCNOW in bus_dma_tag_create as far as possible.
75 * - Check the possible return values and callback error arguments;
76 *   the callback currently gets called in error conditions where it should
77 *   not be.
78 * - When running out of DVMA space, return EINPROGRESS in the non-
79 *   BUS_DMA_NOWAIT case and delay the callback until sufficient space
80 *   becomes available.
81 */
82
83#include "opt_iommu.h"
84
85#include <sys/param.h>
86#include <sys/kernel.h>
87#include <sys/lock.h>
88#include <sys/malloc.h>
89#include <sys/mbuf.h>
90#include <sys/mutex.h>
91#include <sys/pcpu.h>
92#include <sys/proc.h>
93#include <sys/systm.h>
94#include <sys/uio.h>
95
96#include <vm/vm.h>
97#include <vm/pmap.h>
98#include <vm/vm_map.h>
99
100#include <machine/asi.h>
101#include <machine/bus.h>
102#include <machine/bus_private.h>
103#include <machine/iommureg.h>
104#include <machine/pmap.h>
105#include <machine/resource.h>
106#include <machine/ver.h>
107
108#include <sys/rman.h>
109
110#include <machine/iommuvar.h>
111
112/*
113 * Tuning constants
114 */
115#define	IOMMU_MAX_PRE		(32 * 1024)
116#define	IOMMU_MAX_PRE_SEG	3
117
118/* Threshold for using the streaming buffer */
119#define	IOMMU_STREAM_THRESH	128
120
121static MALLOC_DEFINE(M_IOMMU, "dvmamem", "IOMMU DVMA Buffers");
122
123static	int iommu_strbuf_flush_sync(struct iommu_state *);
124#ifdef IOMMU_DIAG
125static	void iommu_diag(struct iommu_state *, vm_offset_t va);
126#endif
127
128/*
129 * Helpers
130 */
131#define	IOMMU_READ8(is, reg, off)					\
132	bus_space_read_8((is)->is_bustag, (is)->is_bushandle,		\
133	    (is)->reg + (off))
134#define	IOMMU_WRITE8(is, reg, off, v)					\
135	bus_space_write_8((is)->is_bustag, (is)->is_bushandle,		\
136	    (is)->reg + (off), (v))
137
138#define	IOMMU_HAS_SB(is)						\
139	((is)->is_sb[0] != 0 || (is)->is_sb[1] != 0)
140
141/*
142 * Always overallocate one page; this is needed to handle alignment of the
143 * buffer, so it makes sense using a lazy allocation scheme.
144 */
145#define	IOMMU_SIZE_ROUNDUP(sz)						\
146	(round_io_page(sz) + IO_PAGE_SIZE)
147
148#define	IOMMU_SET_TTE(is, va, tte)					\
149	((is)->is_tsb[IOTSBSLOT(va)] = (tte))
150#define	IOMMU_GET_TTE(is, va)						\
151	(is)->is_tsb[IOTSBSLOT(va)]
152
153/* Resource helpers */
154#define	IOMMU_RES_START(res)						\
155	((bus_addr_t)rman_get_start(res) << IO_PAGE_SHIFT)
156#define	IOMMU_RES_END(res)						\
157	((bus_addr_t)(rman_get_end(res) + 1) << IO_PAGE_SHIFT)
158#define	IOMMU_RES_SIZE(res)						\
159	((bus_size_t)rman_get_size(res) << IO_PAGE_SHIFT)
160
161/* Helpers for struct bus_dmamap_res */
162#define	BDR_START(r)	IOMMU_RES_START((r)->dr_res)
163#define	BDR_END(r)	IOMMU_RES_END((r)->dr_res)
164#define	BDR_SIZE(r)	IOMMU_RES_SIZE((r)->dr_res)
165
166/* Locking macros */
167#define	IS_LOCK(is)	mtx_lock(&is->is_mtx)
168#define	IS_LOCK_ASSERT(is)	mtx_assert(&is->is_mtx, MA_OWNED)
169#define	IS_UNLOCK(is)	mtx_unlock(&is->is_mtx)
170
171/* Flush a page from the TLB.  No locking required, since this is atomic. */
172static __inline void
173iommu_tlb_flush(struct iommu_state *is, bus_addr_t va)
174{
175
176	if ((is->is_flags & IOMMU_FIRE) != 0)
177		/*
178		 * Direct page flushing is not supported and also not
179		 * necessary due to cache snooping.
180		 */
181		return;
182	IOMMU_WRITE8(is, is_iommu, IMR_FLUSH, va);
183}
184
185/*
186 * Flush a page from the streaming buffer.  No locking required, since this
187 * is atomic.
188 */
189static __inline void
190iommu_strbuf_flushpg(struct iommu_state *is, bus_addr_t va)
191{
192	int i;
193
194	for (i = 0; i < 2; i++)
195		if (is->is_sb[i] != 0)
196			IOMMU_WRITE8(is, is_sb[i], ISR_PGFLUSH, va);
197}
198
199/*
200 * Flush an address from the streaming buffer(s); this is an asynchronous
201 * operation.  To make sure that it has completed, iommu_strbuf_sync() needs
202 * to be called.  No locking required.
203 */
204static __inline void
205iommu_strbuf_flush(struct iommu_state *is, bus_addr_t va)
206{
207
208	iommu_strbuf_flushpg(is, va);
209}
210
211/* Synchronize all outstanding flush operations. */
212static __inline void
213iommu_strbuf_sync(struct iommu_state *is)
214{
215
216	IS_LOCK_ASSERT(is);
217	iommu_strbuf_flush_sync(is);
218}
219
220/* LRU queue handling for lazy resource allocation. */
221static __inline void
222iommu_map_insq(struct iommu_state *is, bus_dmamap_t map)
223{
224
225	IS_LOCK_ASSERT(is);
226	if (!SLIST_EMPTY(&map->dm_reslist)) {
227		if (map->dm_onq)
228			TAILQ_REMOVE(&is->is_maplruq, map, dm_maplruq);
229		TAILQ_INSERT_TAIL(&is->is_maplruq, map, dm_maplruq);
230		map->dm_onq = 1;
231	}
232}
233
234static __inline void
235iommu_map_remq(struct iommu_state *is, bus_dmamap_t map)
236{
237
238	IS_LOCK_ASSERT(is);
239	if (map->dm_onq)
240		TAILQ_REMOVE(&is->is_maplruq, map, dm_maplruq);
241	map->dm_onq = 0;
242}
243
244/*
245 * initialise the UltraSPARC IOMMU (PCI or SBus):
246 *	- allocate and setup the iotsb.
247 *	- enable the IOMMU
248 *	- initialise the streaming buffers (if they exist)
249 *	- create a private DVMA map.
250 */
251void
252iommu_init(const char *name, struct iommu_state *is, u_int tsbsize,
253    uint32_t iovabase, u_int resvpg)
254{
255	vm_size_t size;
256	vm_offset_t offs;
257	uint64_t end, obpmap, obpptsb, tte;
258	u_int maxtsbsize, obptsbentries, obptsbsize, slot, tsbentries;
259	int i;
260
261	/*
262	 * Setup the IOMMU.
263	 *
264	 * The sun4u IOMMU is part of the PCI or SBus controller so we
265	 * will deal with it here..
266	 *
267	 * The IOMMU address space always ends at 0xffffe000, but the starting
268	 * address depends on the size of the map.  The map size is 1024 * 2 ^
269	 * is->is_tsbsize entries, where each entry is 8 bytes.  The start of
270	 * the map can be calculated by (0xffffe000 << (8 + is->is_tsbsize)).
271	 */
272	if ((is->is_flags & IOMMU_FIRE) != 0) {
273		maxtsbsize = IOMMU_TSB512K;
274		/*
275		 * We enable bypass in order to be able to use a physical
276		 * address for the event queue base.
277		 */
278		is->is_cr = IOMMUCR_SE | IOMMUCR_CM_C_TLB_TBW | IOMMUCR_BE;
279	} else {
280		maxtsbsize = IOMMU_TSB128K;
281		is->is_cr = (tsbsize << IOMMUCR_TSBSZ_SHIFT) | IOMMUCR_DE;
282	}
283	if (tsbsize > maxtsbsize)
284		panic("%s: unsupported TSB size	", __func__);
285	tsbentries = IOMMU_TSBENTRIES(tsbsize);
286	is->is_cr |= IOMMUCR_EN;
287	is->is_tsbsize = tsbsize;
288	is->is_dvmabase = iovabase;
289	if (iovabase == -1)
290		is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize);
291
292	size = IOTSB_BASESZ << is->is_tsbsize;
293	printf("%s: DVMA map: %#lx to %#lx %d entries%s\n", name,
294	    is->is_dvmabase, is->is_dvmabase +
295	    (size << (IO_PAGE_SHIFT - IOTTE_SHIFT)) - 1, tsbentries,
296	    IOMMU_HAS_SB(is) ? ", streaming buffer" : "");
297
298	/*
299	 * Set up resource mamangement.
300	 */
301	mtx_init(&is->is_mtx, "iommu", NULL, MTX_DEF);
302	end = is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT));
303	is->is_dvma_rman.rm_type = RMAN_ARRAY;
304	is->is_dvma_rman.rm_descr = "DVMA Memory";
305	if (rman_init(&is->is_dvma_rman) != 0 ||
306	    rman_manage_region(&is->is_dvma_rman,
307	    (is->is_dvmabase >> IO_PAGE_SHIFT) + resvpg,
308	    (end >> IO_PAGE_SHIFT) - 1) != 0)
309		panic("%s: could not initialize DVMA rman", __func__);
310	TAILQ_INIT(&is->is_maplruq);
311
312	/*
313	 * Allocate memory for I/O page tables.  They need to be
314	 * physically contiguous.
315	 */
316	is->is_tsb = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, ~0UL,
317	    PAGE_SIZE, 0);
318	if (is->is_tsb == NULL)
319		panic("%s: contigmalloc failed", __func__);
320	is->is_ptsb = pmap_kextract((vm_offset_t)is->is_tsb);
321	bzero(is->is_tsb, size);
322
323	/*
324	 * Add the PROM mappings to the kernel IOTSB if desired.
325	 * Note that the firmware of certain Darwin boards doesn't set
326	 * the TSB size correctly.
327	 */
328	if ((is->is_flags & IOMMU_FIRE) != 0)
329		obptsbsize = (IOMMU_READ8(is, is_iommu, IMR_TSB) &
330		    IOMMUTB_TSBSZ_MASK) >> IOMMUTB_TSBSZ_SHIFT;
331	else
332		obptsbsize = (IOMMU_READ8(is, is_iommu, IMR_CTL) &
333		    IOMMUCR_TSBSZ_MASK) >> IOMMUCR_TSBSZ_SHIFT;
334	obptsbentries = IOMMU_TSBENTRIES(obptsbsize);
335	if (bootverbose)
336		printf("%s: PROM IOTSB size: %d (%d entries)\n", name,
337		    obptsbsize, obptsbentries);
338	if ((is->is_flags & IOMMU_PRESERVE_PROM) != 0 &&
339	    !(PCPU_GET(impl) == CPU_IMPL_ULTRASPARCIIi && obptsbsize == 7)) {
340		if (obptsbentries > tsbentries)
341			panic("%s: PROM IOTSB entries exceed kernel",
342			    __func__);
343		obpptsb = IOMMU_READ8(is, is_iommu, IMR_TSB) &
344		    IOMMUTB_TB_MASK;
345		for (i = 0; i < obptsbentries; i++) {
346			tte = ldxa(obpptsb + i * 8, ASI_PHYS_USE_EC);
347			if ((tte & IOTTE_V) == 0)
348				continue;
349			slot = tsbentries - obptsbentries + i;
350			if (bootverbose)
351				printf("%s: adding PROM IOTSB slot %d "
352				    "(kernel slot %d) TTE: %#lx\n", name,
353				    i, slot, tte);
354			obpmap = (is->is_dvmabase + slot * IO_PAGE_SIZE) >>
355			    IO_PAGE_SHIFT;
356			if (rman_reserve_resource(&is->is_dvma_rman, obpmap,
357			    obpmap, IO_PAGE_SIZE >> IO_PAGE_SHIFT, RF_ACTIVE,
358			    NULL) == NULL)
359				panic("%s: could not reserve PROM IOTSB slot "
360				    "%d (kernel slot %d)", __func__, i, slot);
361			is->is_tsb[slot] = tte;
362		}
363	}
364
365	/*
366	 * Initialize streaming buffer, if it is there.
367	 */
368	if (IOMMU_HAS_SB(is)) {
369		/*
370		 * Find two 64-byte blocks in is_flush that are aligned on
371		 * a 64-byte boundary for flushing.
372		 */
373		offs = roundup2((vm_offset_t)is->is_flush,
374		    STRBUF_FLUSHSYNC_NBYTES);
375		for (i = 0; i < 2; i++, offs += STRBUF_FLUSHSYNC_NBYTES) {
376			is->is_flushva[i] = (uint64_t *)offs;
377			is->is_flushpa[i] = pmap_kextract(offs);
378		}
379	}
380
381	/*
382	 * Now actually start up the IOMMU.
383	 */
384	iommu_reset(is);
385}
386
387/*
388 * Streaming buffers don't exist on the UltraSPARC IIi; we should have
389 * detected that already and disabled them.  If not, we will notice that
390 * they aren't there when the STRBUF_EN bit does not remain.
391 */
392void
393iommu_reset(struct iommu_state *is)
394{
395	uint64_t tsb;
396	int i;
397
398	tsb = is->is_ptsb;
399	if ((is->is_flags & IOMMU_FIRE) != 0) {
400		tsb |= is->is_tsbsize;
401		IOMMU_WRITE8(is, is_iommu, IMR_CACHE_INVAL, ~0ULL);
402	}
403	IOMMU_WRITE8(is, is_iommu, IMR_TSB, tsb);
404	IOMMU_WRITE8(is, is_iommu, IMR_CTL, is->is_cr);
405
406	for (i = 0; i < 2; i++) {
407		if (is->is_sb[i] != 0) {
408			IOMMU_WRITE8(is, is_sb[i], ISR_CTL, STRBUF_EN |
409			    ((is->is_flags & IOMMU_RERUN_DISABLE) != 0 ?
410			    STRBUF_RR_DIS : 0));
411
412			/* No streaming buffers?  Disable them. */
413			if ((IOMMU_READ8(is, is_sb[i], ISR_CTL) &
414			    STRBUF_EN) == 0)
415				is->is_sb[i] = 0;
416		}
417	}
418
419	(void)IOMMU_READ8(is, is_iommu, IMR_CTL);
420}
421
422/*
423 * Enter a mapping into the TSB.  No locking required, since each TSB slot is
424 * uniquely assigned to a single map.
425 */
426static void
427iommu_enter(struct iommu_state *is, vm_offset_t va, vm_paddr_t pa,
428    int stream, int flags)
429{
430	uint64_t tte;
431
432	KASSERT(va >= is->is_dvmabase,
433	    ("%s: va %#lx not in DVMA space", __func__, va));
434	KASSERT(pa <= is->is_pmaxaddr,
435	    ("%s: XXX: physical address too large (%#lx)", __func__, pa));
436
437	tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE),
438	    !(flags & BUS_DMA_NOCACHE), stream);
439
440	IOMMU_SET_TTE(is, va, tte);
441	iommu_tlb_flush(is, va);
442#ifdef IOMMU_DIAG
443	IS_LOCK(is);
444	iommu_diag(is, va);
445	IS_UNLOCK(is);
446#endif
447}
448
449/*
450 * Remove mappings created by iommu_enter().  Flush the streaming buffer,
451 * but do not synchronize it.  Returns whether a streaming buffer flush
452 * was performed.
453 */
454static int
455iommu_remove(struct iommu_state *is, vm_offset_t va, vm_size_t len)
456{
457	int slot, streamed = 0;
458
459#ifdef IOMMU_DIAG
460	iommu_diag(is, va);
461#endif
462
463	KASSERT(va >= is->is_dvmabase,
464	    ("%s: va 0x%lx not in DVMA space", __func__, (u_long)va));
465	KASSERT(va + len >= va,
466	    ("%s: va 0x%lx + len 0x%lx wraps", __func__, (long)va, (long)len));
467
468	va = trunc_io_page(va);
469	while (len > 0) {
470		if ((IOMMU_GET_TTE(is, va) & IOTTE_STREAM) != 0) {
471			streamed = 1;
472			iommu_strbuf_flush(is, va);
473		}
474		len -= ulmin(len, IO_PAGE_SIZE);
475		IOMMU_SET_TTE(is, va, 0);
476		iommu_tlb_flush(is, va);
477		if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) {
478			slot = IOTSBSLOT(va);
479			if (len <= IO_PAGE_SIZE || slot % 8 == 7)
480				IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH,
481				    is->is_ptsb + slot * 8);
482		}
483		va += IO_PAGE_SIZE;
484	}
485	return (streamed);
486}
487
488/* Decode an IOMMU fault for host bridge error handlers. */
489void
490iommu_decode_fault(struct iommu_state *is, vm_offset_t phys)
491{
492	bus_addr_t va;
493	long idx;
494
495	idx = phys - is->is_ptsb;
496	if (phys < is->is_ptsb ||
497	    idx > (PAGE_SIZE << is->is_tsbsize))
498		return;
499	va = is->is_dvmabase +
500	    (((bus_addr_t)idx >> IOTTE_SHIFT) << IO_PAGE_SHIFT);
501	printf("IOMMU fault virtual address %#lx\n", (u_long)va);
502}
503
504/*
505 * A barrier operation which makes sure that all previous streaming buffer
506 * flushes complete before it returns.
507 */
508static int
509iommu_strbuf_flush_sync(struct iommu_state *is)
510{
511	struct timeval cur, end;
512	int i;
513
514	IS_LOCK_ASSERT(is);
515	if (!IOMMU_HAS_SB(is))
516		return (0);
517
518	/*
519	 * Streaming buffer flushes:
520	 *
521	 *   1 Tell strbuf to flush by storing va to strbuf_pgflush.  If
522	 *     we're not on a cache line boundary (64-bits):
523	 *   2 Store 0 in flag
524	 *   3 Store pointer to flag in flushsync
525	 *   4 wait till flushsync becomes 0x1
526	 *
527	 * If it takes more than .5 sec, something went wrong.
528	 */
529	*is->is_flushva[0] = 1;
530	*is->is_flushva[1] = 1;
531	membar(StoreStore);
532	for (i = 0; i < 2; i++) {
533		if (is->is_sb[i] != 0) {
534			*is->is_flushva[i] = 0;
535			IOMMU_WRITE8(is, is_sb[i], ISR_FLUSHSYNC,
536			    is->is_flushpa[i]);
537		}
538	}
539
540	microuptime(&cur);
541	end.tv_sec = 0;
542	/*
543	 * 0.5s is the recommended timeout from the U2S manual.  The actual
544	 * time required should be smaller by at least a factor of 1000.
545	 * We have no choice but to busy-wait.
546	 */
547	end.tv_usec = 500000;
548	timevaladd(&end, &cur);
549
550	while ((!*is->is_flushva[0] || !*is->is_flushva[1]) &&
551	    timevalcmp(&cur, &end, <=))
552		microuptime(&cur);
553
554	if (!*is->is_flushva[0] || !*is->is_flushva[1]) {
555		panic("%s: flush timeout %ld, %ld at %#lx", __func__,
556		    *is->is_flushva[0], *is->is_flushva[1], is->is_flushpa[0]);
557	}
558
559	return (1);
560}
561
562/* Determine whether we may enable streaming on a mapping. */
563static __inline int
564iommu_use_streaming(struct iommu_state *is, bus_dmamap_t map, bus_size_t size)
565{
566
567	return (size >= IOMMU_STREAM_THRESH && IOMMU_HAS_SB(is) &&
568	    (map->dm_flags & DMF_COHERENT) == 0);
569}
570
571/*
572 * Allocate DVMA virtual memory for a map.  The map may not be on a queue,
573 * so that it can be freely modified.
574 */
575static int
576iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map,
577    bus_size_t size)
578{
579	struct resource *res;
580	struct bus_dmamap_res *bdr;
581	bus_size_t align, sgsize;
582
583	KASSERT(!map->dm_onq, ("%s: map on queue!", __func__));
584	if ((bdr = malloc(sizeof(*bdr), M_IOMMU, M_NOWAIT)) == NULL)
585		return (EAGAIN);
586	/*
587	 * If a boundary is specified, a map cannot be larger than it; however
588	 * we do not clip currently, as that does not play well with the lazy
589	 * allocation code.
590	 * Alignment to a page boundary is always enforced.
591	 */
592	align = (t->dt_alignment + IO_PAGE_MASK) >> IO_PAGE_SHIFT;
593	sgsize = round_io_page(size) >> IO_PAGE_SHIFT;
594	if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE)
595		panic("%s: illegal boundary specified", __func__);
596	res = rman_reserve_resource_bound(&is->is_dvma_rman, 0L,
597	    t->dt_lowaddr >> IO_PAGE_SHIFT, sgsize,
598	    t->dt_boundary >> IO_PAGE_SHIFT,
599	    RF_ACTIVE | rman_make_alignment_flags(align), NULL);
600	if (res == NULL) {
601		free(bdr, M_IOMMU);
602		return (ENOMEM);
603	}
604
605	bdr->dr_res = res;
606	bdr->dr_used = 0;
607	SLIST_INSERT_HEAD(&map->dm_reslist, bdr, dr_link);
608	return (0);
609}
610
611/* Unload the map and mark all resources as unused, but do not free them. */
612static void
613iommu_dvmamap_vunload(struct iommu_state *is, bus_dmamap_t map)
614{
615	struct bus_dmamap_res *r;
616	int streamed = 0;
617
618	IS_LOCK_ASSERT(is);	/* for iommu_strbuf_sync() below */
619	SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
620		streamed |= iommu_remove(is, BDR_START(r), r->dr_used);
621		r->dr_used = 0;
622	}
623	if (streamed)
624		iommu_strbuf_sync(is);
625}
626
627/* Free a DVMA virtual memory resource. */
628static __inline void
629iommu_dvma_vfree_res(bus_dmamap_t map, struct bus_dmamap_res *r)
630{
631
632	KASSERT(r->dr_used == 0, ("%s: resource busy!", __func__));
633	if (r->dr_res != NULL && rman_release_resource(r->dr_res) != 0)
634		printf("warning: DVMA space lost\n");
635	SLIST_REMOVE(&map->dm_reslist, r, bus_dmamap_res, dr_link);
636	free(r, M_IOMMU);
637}
638
639/* Free all DVMA virtual memory for a map. */
640static void
641iommu_dvma_vfree(struct iommu_state *is, bus_dmamap_t map)
642{
643
644	IS_LOCK(is);
645	iommu_map_remq(is, map);
646	iommu_dvmamap_vunload(is, map);
647	IS_UNLOCK(is);
648	while (!SLIST_EMPTY(&map->dm_reslist))
649		iommu_dvma_vfree_res(map, SLIST_FIRST(&map->dm_reslist));
650}
651
652/* Prune a map, freeing all unused DVMA resources. */
653static bus_size_t
654iommu_dvma_vprune(struct iommu_state *is, bus_dmamap_t map)
655{
656	struct bus_dmamap_res *r, *n;
657	bus_size_t freed = 0;
658
659	IS_LOCK_ASSERT(is);
660	for (r = SLIST_FIRST(&map->dm_reslist); r != NULL; r = n) {
661		n = SLIST_NEXT(r, dr_link);
662		if (r->dr_used == 0) {
663			freed += BDR_SIZE(r);
664			iommu_dvma_vfree_res(map, r);
665		}
666	}
667	if (SLIST_EMPTY(&map->dm_reslist))
668		iommu_map_remq(is, map);
669	return (freed);
670}
671
672/*
673 * Try to find a suitably-sized (and if requested, -aligned) slab of DVMA
674 * memory with IO page offset voffs.
675 */
676static bus_addr_t
677iommu_dvma_vfindseg(bus_dmamap_t map, vm_offset_t voffs, bus_size_t size,
678    bus_addr_t amask)
679{
680	struct bus_dmamap_res *r;
681	bus_addr_t dvmaddr, dvmend;
682
683	KASSERT(!map->dm_onq, ("%s: map on queue!", __func__));
684	SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
685		dvmaddr = round_io_page(BDR_START(r) + r->dr_used);
686		/* Alignment can only work with voffs == 0. */
687		dvmaddr = (dvmaddr + amask) & ~amask;
688		dvmaddr += voffs;
689		dvmend = dvmaddr + size;
690		if (dvmend <= BDR_END(r)) {
691			r->dr_used = dvmend - BDR_START(r);
692			return (dvmaddr);
693		}
694	}
695	return (0);
696}
697
698/*
699 * Try to find or allocate a slab of DVMA space; see above.
700 */
701static int
702iommu_dvma_vallocseg(bus_dma_tag_t dt, struct iommu_state *is, bus_dmamap_t map,
703    vm_offset_t voffs, bus_size_t size, bus_addr_t amask, bus_addr_t *addr)
704{
705	bus_dmamap_t tm, last;
706	bus_addr_t dvmaddr, freed;
707	int error, complete = 0;
708
709	dvmaddr = iommu_dvma_vfindseg(map, voffs, size, amask);
710
711	/* Need to allocate. */
712	if (dvmaddr == 0) {
713		while ((error = iommu_dvma_valloc(dt, is, map,
714			voffs + size)) == ENOMEM && !complete) {
715			/*
716			 * Free the allocated DVMA of a few maps until
717			 * the required size is reached. This is an
718			 * approximation to not have to call the allocation
719			 * function too often; most likely one free run
720			 * will not suffice if not one map was large enough
721			 * itself due to fragmentation.
722			 */
723			IS_LOCK(is);
724			freed = 0;
725			last = TAILQ_LAST(&is->is_maplruq, iommu_maplruq_head);
726			do {
727				tm = TAILQ_FIRST(&is->is_maplruq);
728				complete = tm == last;
729				if (tm == NULL)
730					break;
731				freed += iommu_dvma_vprune(is, tm);
732				/* Move to the end. */
733				iommu_map_insq(is, tm);
734			} while (freed < size && !complete);
735			IS_UNLOCK(is);
736		}
737		if (error != 0)
738			return (error);
739		dvmaddr = iommu_dvma_vfindseg(map, voffs, size, amask);
740		KASSERT(dvmaddr != 0, ("%s: allocation failed unexpectedly!",
741		    __func__));
742	}
743	*addr = dvmaddr;
744	return (0);
745}
746
747static int
748iommu_dvmamem_alloc(bus_dma_tag_t dt, void **vaddr, int flags,
749    bus_dmamap_t *mapp)
750{
751	struct iommu_state *is = dt->dt_cookie;
752	int error, mflags;
753
754	/*
755	 * XXX: This will break for 32 bit transfers on machines with more
756	 * than is->is_pmaxaddr memory.
757	 */
758	if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
759		return (error);
760
761	if ((flags & BUS_DMA_NOWAIT) != 0)
762		mflags = M_NOWAIT;
763	else
764		mflags = M_WAITOK;
765	if ((flags & BUS_DMA_ZERO) != 0)
766		mflags |= M_ZERO;
767
768	if ((*vaddr = malloc(dt->dt_maxsize, M_IOMMU, mflags)) == NULL) {
769		error = ENOMEM;
770		sparc64_dma_free_map(dt, *mapp);
771		return (error);
772	}
773	if ((flags & BUS_DMA_COHERENT) != 0)
774		(*mapp)->dm_flags |= DMF_COHERENT;
775	/*
776	 * Try to preallocate DVMA space.  If this fails, it is retried at
777	 * load time.
778	 */
779	iommu_dvma_valloc(dt, is, *mapp, IOMMU_SIZE_ROUNDUP(dt->dt_maxsize));
780	IS_LOCK(is);
781	iommu_map_insq(is, *mapp);
782	IS_UNLOCK(is);
783	return (0);
784}
785
786static void
787iommu_dvmamem_free(bus_dma_tag_t dt, void *vaddr, bus_dmamap_t map)
788{
789	struct iommu_state *is = dt->dt_cookie;
790
791	iommu_dvma_vfree(is, map);
792	sparc64_dma_free_map(dt, map);
793	free(vaddr, M_IOMMU);
794}
795
796static int
797iommu_dvmamap_create(bus_dma_tag_t dt, int flags, bus_dmamap_t *mapp)
798{
799	struct iommu_state *is = dt->dt_cookie;
800	bus_size_t totsz, presz, currsz;
801	int error, i, maxpre;
802
803	if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0)
804		return (error);
805	if ((flags & BUS_DMA_COHERENT) != 0)
806		(*mapp)->dm_flags |= DMF_COHERENT;
807	/*
808	 * Preallocate DVMA space; if this fails now, it is retried at load
809	 * time.  Through bus_dmamap_load_mbuf() and bus_dmamap_load_uio(),
810	 * it is possible to have multiple discontiguous segments in a single
811	 * map, which is handled by allocating additional resources, instead
812	 * of increasing the size, to avoid fragmentation.
813	 * Clamp preallocation to IOMMU_MAX_PRE.  In some situations we can
814	 * handle more; that case is handled by reallocating at map load time.
815	 */
816	totsz = ulmin(IOMMU_SIZE_ROUNDUP(dt->dt_maxsize), IOMMU_MAX_PRE);
817	error = iommu_dvma_valloc(dt, is, *mapp, totsz);
818	if (error != 0)
819		return (0);
820	/*
821	 * Try to be smart about preallocating some additional segments if
822	 * needed.
823	 */
824	maxpre = imin(dt->dt_nsegments, IOMMU_MAX_PRE_SEG);
825	presz = dt->dt_maxsize / maxpre;
826	for (i = 1; i < maxpre && totsz < IOMMU_MAX_PRE; i++) {
827		currsz = round_io_page(ulmin(presz, IOMMU_MAX_PRE - totsz));
828		error = iommu_dvma_valloc(dt, is, *mapp, currsz);
829		if (error != 0)
830			break;
831		totsz += currsz;
832	}
833	IS_LOCK(is);
834	iommu_map_insq(is, *mapp);
835	IS_UNLOCK(is);
836	return (0);
837}
838
839static int
840iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map)
841{
842	struct iommu_state *is = dt->dt_cookie;
843
844	iommu_dvma_vfree(is, map);
845	sparc64_dma_free_map(dt, map);
846	return (0);
847}
848
849/*
850 * Utility function to load a physical buffer.  segp contains
851 * the starting segment on entrace, and the ending segment on exit.
852 */
853static int
854iommu_dvmamap_load_phys(bus_dma_tag_t dt, bus_dmamap_t map, vm_paddr_t buf,
855    bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
856{
857	bus_addr_t amask, dvmaddr, dvmoffs;
858	bus_size_t sgsize, esize;
859	struct iommu_state *is;
860	vm_offset_t voffs;
861	vm_paddr_t curaddr;
862	int error, firstpg, sgcnt;
863	u_int slot;
864
865	is = dt->dt_cookie;
866	if (*segp == -1) {
867		if ((map->dm_flags & DMF_LOADED) != 0) {
868#ifdef DIAGNOSTIC
869			printf("%s: map still in use\n", __func__);
870#endif
871			bus_dmamap_unload(dt, map);
872		}
873
874		/*
875		 * Make sure that the map is not on a queue so that the
876		 * resource list may be safely accessed and modified without
877		 * needing the lock to cover the whole operation.
878		 */
879		IS_LOCK(is);
880		iommu_map_remq(is, map);
881		IS_UNLOCK(is);
882
883		amask = dt->dt_alignment - 1;
884	} else
885		amask = 0;
886	KASSERT(buflen != 0, ("%s: buflen == 0!", __func__));
887	if (buflen > dt->dt_maxsize)
888		return (EINVAL);
889
890	if (segs == NULL)
891		segs = dt->dt_segments;
892
893	voffs = buf & IO_PAGE_MASK;
894
895	/* Try to find a slab that is large enough. */
896	error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
897	    &dvmaddr);
898	if (error != 0)
899		return (error);
900
901	sgcnt = *segp;
902	firstpg = 1;
903	map->dm_flags &= ~DMF_STREAMED;
904	map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ?
905	    DMF_STREAMED : 0;
906	for (; buflen > 0; ) {
907		curaddr = buf;
908
909		/*
910		 * Compute the segment size, and adjust counts.
911		 */
912		sgsize = IO_PAGE_SIZE - ((u_long)buf & IO_PAGE_MASK);
913		if (buflen < sgsize)
914			sgsize = buflen;
915
916		buflen -= sgsize;
917		buf += sgsize;
918
919		dvmoffs = trunc_io_page(dvmaddr);
920		iommu_enter(is, dvmoffs, trunc_io_page(curaddr),
921		    (map->dm_flags & DMF_STREAMED) != 0, flags);
922		if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) {
923			slot = IOTSBSLOT(dvmoffs);
924			if (buflen <= 0 || slot % 8 == 7)
925				IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH,
926				    is->is_ptsb + slot * 8);
927		}
928
929		/*
930		 * Chop the chunk up into segments of at most maxsegsz, but try
931		 * to fill each segment as well as possible.
932		 */
933		if (!firstpg) {
934			esize = ulmin(sgsize,
935			    dt->dt_maxsegsz - segs[sgcnt].ds_len);
936			segs[sgcnt].ds_len += esize;
937			sgsize -= esize;
938			dvmaddr += esize;
939		}
940		while (sgsize > 0) {
941			sgcnt++;
942			if (sgcnt >= dt->dt_nsegments)
943				return (EFBIG);
944			/*
945			 * No extra alignment here - the common practice in
946			 * the busdma code seems to be that only the first
947			 * segment needs to satisfy the alignment constraints
948			 * (and that only for bus_dmamem_alloc()ed maps).
949			 * It is assumed that such tags have maxsegsize >=
950			 * maxsize.
951			 */
952			esize = ulmin(sgsize, dt->dt_maxsegsz);
953			segs[sgcnt].ds_addr = dvmaddr;
954			segs[sgcnt].ds_len = esize;
955			sgsize -= esize;
956			dvmaddr += esize;
957		}
958
959		firstpg = 0;
960	}
961	*segp = sgcnt;
962	return (0);
963}
964
965/*
966 * IOMMU DVMA operations, common to PCI and SBus
967 */
968static int
969iommu_dvmamap_load_buffer(bus_dma_tag_t dt, bus_dmamap_t map, void *buf,
970    bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
971    int *segp)
972{
973	bus_addr_t amask, dvmaddr, dvmoffs;
974	bus_size_t sgsize, esize;
975	struct iommu_state *is;
976	vm_offset_t vaddr, voffs;
977	vm_paddr_t curaddr;
978	int error, firstpg, sgcnt;
979	u_int slot;
980
981	is = dt->dt_cookie;
982	if (*segp == -1) {
983		if ((map->dm_flags & DMF_LOADED) != 0) {
984#ifdef DIAGNOSTIC
985			printf("%s: map still in use\n", __func__);
986#endif
987			bus_dmamap_unload(dt, map);
988		}
989
990		/*
991		 * Make sure that the map is not on a queue so that the
992		 * resource list may be safely accessed and modified without
993		 * needing the lock to cover the whole operation.
994		 */
995		IS_LOCK(is);
996		iommu_map_remq(is, map);
997		IS_UNLOCK(is);
998
999		amask = dt->dt_alignment - 1;
1000	} else
1001		amask = 0;
1002	KASSERT(buflen != 0, ("%s: buflen == 0!", __func__));
1003	if (buflen > dt->dt_maxsize)
1004		return (EINVAL);
1005
1006	if (segs == NULL)
1007		segs = dt->dt_segments;
1008
1009	vaddr = (vm_offset_t)buf;
1010	voffs = vaddr & IO_PAGE_MASK;
1011
1012	/* Try to find a slab that is large enough. */
1013	error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask,
1014	    &dvmaddr);
1015	if (error != 0)
1016		return (error);
1017
1018	sgcnt = *segp;
1019	firstpg = 1;
1020	map->dm_flags &= ~DMF_STREAMED;
1021	map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ?
1022	    DMF_STREAMED : 0;
1023	for (; buflen > 0; ) {
1024		/*
1025		 * Get the physical address for this page.
1026		 */
1027		if (pmap == kernel_pmap)
1028			curaddr = pmap_kextract(vaddr);
1029		else
1030			curaddr = pmap_extract(pmap, vaddr);
1031
1032		/*
1033		 * Compute the segment size, and adjust counts.
1034		 */
1035		sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK);
1036		if (buflen < sgsize)
1037			sgsize = buflen;
1038
1039		buflen -= sgsize;
1040		vaddr += sgsize;
1041
1042		dvmoffs = trunc_io_page(dvmaddr);
1043		iommu_enter(is, dvmoffs, trunc_io_page(curaddr),
1044		    (map->dm_flags & DMF_STREAMED) != 0, flags);
1045		if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) {
1046			slot = IOTSBSLOT(dvmoffs);
1047			if (buflen <= 0 || slot % 8 == 7)
1048				IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH,
1049				    is->is_ptsb + slot * 8);
1050		}
1051
1052		/*
1053		 * Chop the chunk up into segments of at most maxsegsz, but try
1054		 * to fill each segment as well as possible.
1055		 */
1056		if (!firstpg) {
1057			esize = ulmin(sgsize,
1058			    dt->dt_maxsegsz - segs[sgcnt].ds_len);
1059			segs[sgcnt].ds_len += esize;
1060			sgsize -= esize;
1061			dvmaddr += esize;
1062		}
1063		while (sgsize > 0) {
1064			sgcnt++;
1065			if (sgcnt >= dt->dt_nsegments)
1066				return (EFBIG);
1067			/*
1068			 * No extra alignment here - the common practice in
1069			 * the busdma code seems to be that only the first
1070			 * segment needs to satisfy the alignment constraints
1071			 * (and that only for bus_dmamem_alloc()ed maps).
1072			 * It is assumed that such tags have maxsegsize >=
1073			 * maxsize.
1074			 */
1075			esize = ulmin(sgsize, dt->dt_maxsegsz);
1076			segs[sgcnt].ds_addr = dvmaddr;
1077			segs[sgcnt].ds_len = esize;
1078			sgsize -= esize;
1079			dvmaddr += esize;
1080		}
1081
1082		firstpg = 0;
1083	}
1084	*segp = sgcnt;
1085	return (0);
1086}
1087
1088static void
1089iommu_dvmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
1090    struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
1091{
1092}
1093
1094static bus_dma_segment_t *
1095iommu_dvmamap_complete(bus_dma_tag_t dt, bus_dmamap_t map,
1096    bus_dma_segment_t *segs, int nsegs, int error)
1097{
1098	struct iommu_state *is = dt->dt_cookie;
1099
1100	IS_LOCK(is);
1101	iommu_map_insq(is, map);
1102	if (error != 0) {
1103		iommu_dvmamap_vunload(is, map);
1104		IS_UNLOCK(is);
1105	} else {
1106		IS_UNLOCK(is);
1107		map->dm_flags |= DMF_LOADED;
1108	}
1109	if (segs == NULL)
1110		segs = dt->dt_segments;
1111	return (segs);
1112}
1113
1114static void
1115iommu_dvmamap_unload(bus_dma_tag_t dt, bus_dmamap_t map)
1116{
1117	struct iommu_state *is = dt->dt_cookie;
1118
1119	if ((map->dm_flags & DMF_LOADED) == 0)
1120		return;
1121	IS_LOCK(is);
1122	iommu_dvmamap_vunload(is, map);
1123	iommu_map_insq(is, map);
1124	IS_UNLOCK(is);
1125	map->dm_flags &= ~DMF_LOADED;
1126}
1127
1128static void
1129iommu_dvmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op)
1130{
1131	struct iommu_state *is = dt->dt_cookie;
1132	struct bus_dmamap_res *r;
1133	vm_offset_t va;
1134	vm_size_t len;
1135	int streamed = 0;
1136
1137	if ((map->dm_flags & DMF_LOADED) == 0)
1138		return;
1139	if ((map->dm_flags & DMF_STREAMED) != 0 &&
1140	    ((op & BUS_DMASYNC_POSTREAD) != 0 ||
1141	    (op & BUS_DMASYNC_PREWRITE) != 0)) {
1142		IS_LOCK(is);
1143		SLIST_FOREACH(r, &map->dm_reslist, dr_link) {
1144			va = (vm_offset_t)BDR_START(r);
1145			len = r->dr_used;
1146			/*
1147			 * If we have a streaming buffer, flush it here
1148			 * first.
1149			 */
1150			while (len > 0) {
1151				if ((IOMMU_GET_TTE(is, va) &
1152				    IOTTE_STREAM) != 0) {
1153					streamed = 1;
1154					iommu_strbuf_flush(is, va);
1155				}
1156				len -= ulmin(len, IO_PAGE_SIZE);
1157				va += IO_PAGE_SIZE;
1158			}
1159		}
1160		if (streamed)
1161			iommu_strbuf_sync(is);
1162		IS_UNLOCK(is);
1163	}
1164	if ((op & BUS_DMASYNC_PREWRITE) != 0)
1165		membar(Sync);
1166}
1167
1168#ifdef IOMMU_DIAG
1169
1170/*
1171 * Perform an IOMMU diagnostic access and print the tag belonging to va.
1172 */
1173static void
1174iommu_diag(struct iommu_state *is, vm_offset_t va)
1175{
1176	int i;
1177	uint64_t data, tag;
1178
1179	if ((is->is_flags & IOMMU_FIRE) != 0)
1180		return;
1181	IS_LOCK_ASSERT(is);
1182	IOMMU_WRITE8(is, is_dva, 0, trunc_io_page(va));
1183	membar(StoreStore | StoreLoad);
1184	printf("%s: tte entry %#lx", __func__, IOMMU_GET_TTE(is, va));
1185	if (is->is_dtcmp != 0) {
1186		printf(", tag compare register is %#lx\n",
1187		    IOMMU_READ8(is, is_dtcmp, 0));
1188	} else
1189		printf("\n");
1190	for (i = 0; i < 16; i++) {
1191		tag = IOMMU_READ8(is, is_dtag, i * 8);
1192		data = IOMMU_READ8(is, is_ddram, i * 8);
1193		printf("%s: tag %d: %#lx, vpn %#lx, err %lx; "
1194		    "data %#lx, pa %#lx, v %d, c %d\n", __func__, i,
1195		    tag, (tag & IOMMU_DTAG_VPNMASK) << IOMMU_DTAG_VPNSHIFT,
1196		    (tag & IOMMU_DTAG_ERRMASK) >> IOMMU_DTAG_ERRSHIFT, data,
1197		    (data & IOMMU_DDATA_PGMASK) << IOMMU_DDATA_PGSHIFT,
1198		    (data & IOMMU_DDATA_V) != 0, (data & IOMMU_DDATA_C) != 0);
1199	}
1200}
1201
1202#endif /* IOMMU_DIAG */
1203
1204struct bus_dma_methods iommu_dma_methods = {
1205	iommu_dvmamap_create,
1206	iommu_dvmamap_destroy,
1207	iommu_dvmamap_load_phys,
1208	iommu_dvmamap_load_buffer,
1209	iommu_dvmamap_waitok,
1210	iommu_dvmamap_complete,
1211	iommu_dvmamap_unload,
1212	iommu_dvmamap_sync,
1213	iommu_dvmamem_alloc,
1214	iommu_dvmamem_free,
1215};
1216