1/*	$NetBSD: gapspci_dma.c,v 1.21 2023/12/02 22:42:02 thorpej Exp $	*/
2
3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Bus DMA implementation for the SEGA GAPS PCI bridge.
34 *
35 * NOTE: We only implement a small subset of what the bus_dma(9)
36 * API specifies.  Right now, the GAPS PCI bridge is only used for
37 * the Dreamcast Broadband Adatper, so we only provide what the
38 * pci(4) and rtk(4) drivers need.
39 */
40
41#include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
42__KERNEL_RCSID(0, "$NetBSD: gapspci_dma.c,v 1.21 2023/12/02 22:42:02 thorpej Exp $");
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/device.h>
47#include <sys/mbuf.h>
48#include <sys/vmem.h>
49#include <sys/malloc.h>
50#include <sys/bus.h>
51
52#include <machine/cpu.h>
53
54#include <dev/pci/pcivar.h>
55
56#include <dreamcast/dev/g2/gapspcivar.h>
57
58#include <uvm/uvm.h>
59
60int	gaps_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
61	    bus_size_t, int, bus_dmamap_t *);
62void	gaps_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
63int	gaps_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t,
64	    struct proc *, int);
65int	gaps_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int);
66int	gaps_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int);
67int	gaps_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
68	    int, bus_size_t, int);
69void	gaps_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
70void	gaps_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
71	    bus_size_t, int);
72
73int	gaps_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
74	    bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs,
75	    int nsegs, int *rsegs, int flags);
76void	gaps_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs);
77int	gaps_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
78	    size_t size, void **kvap, int flags);
79void	gaps_dmamem_unmap(bus_dma_tag_t tag, void *kva, size_t size);
80paddr_t	gaps_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs,
81	    off_t off, int prot, int flags);
82
83void
84gaps_dma_init(struct gaps_softc *sc)
85{
86	bus_dma_tag_t t = &sc->sc_dmat;
87
88	memset(t, 0, sizeof(*t));
89
90	t->_cookie = sc;
91	t->_dmamap_create = gaps_dmamap_create;
92	t->_dmamap_destroy = gaps_dmamap_destroy;
93	t->_dmamap_load = gaps_dmamap_load;
94	t->_dmamap_load_mbuf = gaps_dmamap_load_mbuf;
95	t->_dmamap_load_uio = gaps_dmamap_load_uio;
96	t->_dmamap_load_raw = gaps_dmamap_load_raw;
97	t->_dmamap_unload = gaps_dmamap_unload;
98	t->_dmamap_sync = gaps_dmamap_sync;
99
100	t->_dmamem_alloc = gaps_dmamem_alloc;
101	t->_dmamem_free = gaps_dmamem_free;
102	t->_dmamem_map = gaps_dmamem_map;
103	t->_dmamem_unmap = gaps_dmamem_unmap;
104	t->_dmamem_mmap = gaps_dmamem_mmap;
105
106	/*
107	 * The GAPS PCI bridge has 32k of DMA memory.  We manage it
108	 * with a vmem arena.
109	 */
110	sc->sc_dma_arena = vmem_create("gaps dma",
111				       sc->sc_dmabase,
112				       sc->sc_dmasize,
113				       1024 /* XXX */,	/* quantum */
114				       NULL,		/* allocfn */
115				       NULL,		/* freefn */
116				       NULL,		/* arg */
117				       0,		/* qcache_max */
118				       VM_SLEEP,
119				       IPL_VM);
120
121	if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
122	    0, &sc->sc_dma_memh) != 0)
123		panic("gaps_dma_init: can't map SRAM buffer");
124}
125
126/*
127 * A GAPS DMA map -- has the standard DMA map, plus some extra
128 * housekeeping data.
129 */
130struct gaps_dmamap {
131	struct dreamcast_bus_dmamap gd_dmamap;
132	void *gd_origbuf;
133	int gd_buftype;
134};
135
136#define	GAPS_DMA_BUFTYPE_INVALID	0
137#define	GAPS_DMA_BUFTYPE_LINEAR		1
138#define	GAPS_DMA_BUFTYPE_MBUF		2
139
140int
141gaps_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
142    bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
143{
144	struct gaps_softc *sc = t->_cookie;
145	struct gaps_dmamap *gmap;
146	bus_dmamap_t map;
147
148	/*
149	 * Allocate an initialize the DMA map.  The end of the map is
150	 * a variable-sized array of segments, so we allocate enough
151	 * room for them in one shot.  Since the DMA map always includes
152	 * one segment, and we only support one segment, this is really
153	 * easy.
154	 *
155	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation
156	 * of ALLOCNOW notifies others that we've reserved these resources
157	 * and they are not to be freed.
158	 */
159
160	gmap = malloc(sizeof(*gmap), M_DMAMAP,
161	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK);
162	if (gmap == NULL)
163		return ENOMEM;
164
165	memset(gmap, 0, sizeof(*gmap));
166
167	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
168
169	map = &gmap->gd_dmamap;
170
171	map->_dm_size = size;
172	map->_dm_segcnt = 1;
173	map->_dm_maxmaxsegsz = maxsegsz;
174	map->_dm_boundary = boundary;
175	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
176	map->dm_maxsegsz = maxsegsz;
177
178	if (flags & BUS_DMA_ALLOCNOW) {
179		vmem_addr_t res;
180		int error;
181
182		const vm_flag_t vmflags = VM_BESTFIT |
183		    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
184
185		error = vmem_xalloc(sc->sc_dma_arena, size,
186				    0,			/* alignment */
187				    0,			/* phase */
188				    0,			/* nocross */
189				    VMEM_ADDR_MIN,	/* minaddr */
190				    VMEM_ADDR_MAX,	/* maxaddr */
191				    vmflags,
192				    &res);
193		if (error) {
194			free(gmap, M_DEVBUF);
195			return error;
196		}
197
198		map->dm_segs[0].ds_addr = res;
199		map->dm_segs[0].ds_len = size;
200
201		map->dm_mapsize = size;
202		map->dm_nsegs = 1;
203	} else {
204		map->dm_mapsize = 0;		/* no valid mappings */
205		map->dm_nsegs = 0;
206	}
207
208	*dmamap = map;
209
210	return 0;
211}
212
213void
214gaps_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
215{
216	struct gaps_softc *sc = t->_cookie;
217
218	if (map->_dm_flags & BUS_DMA_ALLOCNOW) {
219		vmem_xfree(sc->sc_dma_arena, map->dm_segs[0].ds_addr,
220		    map->dm_mapsize);
221	}
222	free(map, M_DMAMAP);
223}
224
225int
226gaps_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *addr,
227    bus_size_t size, struct proc *p, int flags)
228{
229	struct gaps_softc *sc = t->_cookie;
230	struct gaps_dmamap *gmap = (void *) map;
231	vmem_addr_t res;
232	int error;
233
234	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
235		/*
236		 * Make sure that on error condition we return
237		 * "no valid mappings".
238		 */
239		map->dm_mapsize = 0;
240		map->dm_nsegs = 0;
241		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
242	}
243
244	/* XXX Don't support DMA to process space right now. */
245	if (p != NULL)
246		return EINVAL;
247
248	if (size > map->_dm_size)
249		return EINVAL;
250
251	const vm_flag_t vmflags = VM_BESTFIT |
252	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
253
254	error = vmem_xalloc(sc->sc_dma_arena, size,
255			    0,			/* alignment */
256			    0,			/* phase */
257			    map->_dm_boundary,	/* nocross */
258			    VMEM_ADDR_MIN,	/* minaddr */
259			    VMEM_ADDR_MAX,	/* maxaddr */
260			    vmflags,
261			    &res);
262	if (error)
263		return error;
264
265	map->dm_segs[0].ds_addr = res;
266	map->dm_segs[0].ds_len = size;
267
268	gmap->gd_origbuf = addr;
269	gmap->gd_buftype = GAPS_DMA_BUFTYPE_LINEAR;
270
271	map->dm_mapsize = size;
272	map->dm_nsegs = 1;
273
274	return 0;
275}
276
277int
278gaps_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
279    int flags)
280{
281	struct gaps_softc *sc = t->_cookie;
282	struct gaps_dmamap *gmap = (void *) map;
283	vmem_addr_t res;
284	int error;
285
286	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
287		/*
288		 * Make sure that on error condition we return
289		 * "no valid mappings".
290		 */
291		map->dm_mapsize = 0;
292		map->dm_nsegs = 0;
293		KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
294	}
295
296#ifdef DIAGNOSTIC
297	if ((m0->m_flags & M_PKTHDR) == 0)
298		panic("gaps_dmamap_load_mbuf: no packet header");
299#endif
300
301	if (m0->m_pkthdr.len > map->_dm_size)
302		return EINVAL;
303
304	const vm_flag_t vmflags = VM_BESTFIT |
305	    ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
306
307	error = vmem_xalloc(sc->sc_dma_arena, m0->m_pkthdr.len,
308			    0,			/* alignment */
309			    0,			/* phase */
310			    map->_dm_boundary,	/* nocross */
311			    VMEM_ADDR_MIN,	/* minaddr */
312			    VMEM_ADDR_MAX,	/* maxaddr */
313			    vmflags,
314			    &res);
315	if (error)
316		return error;
317
318	map->dm_segs[0].ds_addr = res;
319	map->dm_segs[0].ds_len = m0->m_pkthdr.len;
320
321	gmap->gd_origbuf = m0;
322	gmap->gd_buftype = GAPS_DMA_BUFTYPE_MBUF;
323
324	map->dm_mapsize = m0->m_pkthdr.len;
325	map->dm_nsegs = 1;
326
327	return 0;
328}
329
330int
331gaps_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
332    int flags)
333{
334
335	printf("gaps_dmamap_load_uio: not implemented\n");
336	return EINVAL;
337}
338
339int
340gaps_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
341    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
342{
343
344	printf("gaps_dmamap_load_raw: not implemented\n");
345	return EINVAL;
346}
347
348void
349gaps_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
350{
351	struct gaps_softc *sc = t->_cookie;
352	struct gaps_dmamap *gmap = (void *) map;
353
354	if (gmap->gd_buftype == GAPS_DMA_BUFTYPE_INVALID) {
355		printf("gaps_dmamap_unload: DMA map not loaded!\n");
356		return;
357	}
358
359	if ((map->_dm_flags & BUS_DMA_ALLOCNOW) == 0) {
360		vmem_xfree(sc->sc_dma_arena, map->dm_segs[0].ds_addr,
361		    map->dm_mapsize);
362
363		map->dm_maxsegsz = map->_dm_maxmaxsegsz;
364		map->dm_mapsize = 0;
365		map->dm_nsegs = 0;
366	}
367
368	gmap->gd_buftype = GAPS_DMA_BUFTYPE_INVALID;
369}
370
371void
372gaps_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
373    bus_size_t len, int ops)
374{
375	struct gaps_softc *sc = t->_cookie;
376	struct gaps_dmamap *gmap = (void *) map;
377	bus_addr_t dmaoff = map->dm_segs[0].ds_addr - sc->sc_dmabase;
378
379	/*
380	 * Mixing PRE and POST operations is not allowed.
381	 */
382	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
383	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
384		panic("gaps_dmamap_sync: mix PRE and POST");
385
386#ifdef DIAGNOSTIC
387	if ((ops & (BUS_DMASYNC_PREWRITE|BUS_DMASYNC_POSTREAD)) != 0) {
388		if (offset >= map->dm_mapsize) {
389			printf("offset 0x%lx mapsize 0x%lx\n",
390			    offset, map->dm_mapsize);
391			panic("gaps_dmamap_sync: bad offset");
392		}
393		if (len == 0 || (offset + len) > map->dm_mapsize) {
394			printf("len 0x%lx offset 0x%lx mapsize 0x%lx\n",
395			    len, offset, map->dm_mapsize);
396			panic("gaps_dmamap_sync: bad length");
397		}
398	}
399#endif
400
401	switch (gmap->gd_buftype) {
402	case GAPS_DMA_BUFTYPE_INVALID:
403		printf("gaps_dmamap_sync: DMA map is not loaded!\n");
404		return;
405
406	case GAPS_DMA_BUFTYPE_LINEAR:
407		/*
408		 * Nothing to do for pre-read.
409		 */
410
411		if (ops & BUS_DMASYNC_PREWRITE) {
412			/*
413			 * Copy the caller's buffer to the SRAM buffer.
414			 */
415			bus_space_write_region_1(sc->sc_memt,
416			    sc->sc_dma_memh,
417			    dmaoff + offset,
418			    (uint8_t *)gmap->gd_origbuf + offset, len);
419		}
420
421		if (ops & BUS_DMASYNC_POSTREAD) {
422			/*
423			 * Copy the SRAM buffer to the caller's buffer.
424			 */
425			bus_space_read_region_1(sc->sc_memt,
426			    sc->sc_dma_memh,
427			    dmaoff + offset,
428			    (uint8_t *)gmap->gd_origbuf + offset, len);
429		}
430
431		/*
432		 * Nothing to do for post-write.
433		 */
434		break;
435
436	case GAPS_DMA_BUFTYPE_MBUF:
437	    {
438		struct mbuf *m, *m0 = gmap->gd_origbuf;
439		bus_size_t minlen, moff;
440
441		/*
442		 * Nothing to do for pre-read.
443		 */
444
445		if (ops & BUS_DMASYNC_PREWRITE) {
446			/*
447			 * Copy the caller's buffer into the SRAM buffer.
448			 */
449			for (moff = offset, m = m0; m != NULL && len != 0;
450			     m = m->m_next) {
451				/* Find the beginning mbuf. */
452				if (moff >= m->m_len) {
453					moff -= m->m_len;
454					continue;
455				}
456
457				/*
458				 * Now at the first mbuf to sync; nail
459				 * each one until we have exhausted the
460				 * length.
461				 */
462				minlen = len < m->m_len - moff ?
463				    len : m->m_len - moff;
464
465				bus_space_write_region_1(sc->sc_memt,
466				    sc->sc_dma_memh, dmaoff + offset,
467				    mtod(m, uint8_t *) + moff, minlen);
468
469				moff = 0;
470				len -= minlen;
471				offset += minlen;
472			}
473		}
474
475		if (ops & BUS_DMASYNC_POSTREAD) {
476			/*
477			 * Copy the SRAM buffer into the caller's buffer.
478			 */
479			for (moff = offset, m = m0; m != NULL && len != 0;
480			     m = m->m_next) {
481				/* Find the beginning mbuf. */
482				if (moff >= m->m_len) {
483					moff -= m->m_len;
484					continue;
485				}
486
487				/*
488				 * Now at the first mbuf to sync; nail
489				 * each one until we have exhausted the
490				 * length.
491				 */
492				minlen = len < m->m_len - moff ?
493				    len : m->m_len - moff;
494
495				bus_space_read_region_1(sc->sc_memt,
496				    sc->sc_dma_memh, dmaoff + offset,
497				    mtod(m, uint8_t *) + moff, minlen);
498
499				moff = 0;
500				len -= minlen;
501				offset += minlen;
502			}
503		}
504
505		/*
506		 * Nothing to do for post-write.
507		 */
508		break;
509	    }
510
511	default:
512		printf("unknown buffer type %d\n", gmap->gd_buftype);
513		panic("gaps_dmamap_sync");
514	}
515}
516
517int
518gaps_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
519    bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
520    int flags)
521{
522	extern paddr_t avail_start, avail_end;	/* from pmap.c */
523
524	struct pglist mlist;
525	paddr_t curaddr, lastaddr;
526	struct vm_page *m;
527	int curseg, error;
528
529	/* Always round the size. */
530	size = round_page(size);
531
532	/*
533	 * Allocate the pages from the VM system.
534	 */
535	error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
536	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
537	if (error)
538		return error;
539
540	/*
541	 * Compute the location, size, and number of segments actually
542	 * returned by the VM code.
543	 */
544	m = mlist.tqh_first;
545	curseg = 0;
546	lastaddr = segs[curseg].ds_addr = VM_PAGE_TO_PHYS(m);
547	segs[curseg].ds_len = PAGE_SIZE;
548	m = TAILQ_NEXT(m, pageq.queue);
549
550	for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) {
551		curaddr = VM_PAGE_TO_PHYS(m);
552		if (curaddr == (lastaddr + PAGE_SIZE))
553			segs[curseg].ds_len += PAGE_SIZE;
554		else {
555			curseg++;
556			segs[curseg].ds_addr = curaddr;
557			segs[curseg].ds_len = PAGE_SIZE;
558		}
559		lastaddr = curaddr;
560	}
561
562	*rsegs = curseg + 1;
563
564	return 0;
565}
566
567void
568gaps_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
569{
570	struct pglist mlist;
571	struct vm_page *m;
572	bus_addr_t addr;
573	int curseg;
574
575	/*
576	 * Build a list of pages to free back to the VM system.
577	 */
578	TAILQ_INIT(&mlist);
579	for (curseg = 0; curseg < nsegs; curseg++) {
580		for (addr = segs[curseg].ds_addr;
581		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
582		     addr += PAGE_SIZE) {
583			m = PHYS_TO_VM_PAGE(addr);
584			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
585		}
586	}
587
588	uvm_pglistfree(&mlist);
589}
590
591int
592gaps_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
593    size_t size, void **kvap, int flags)
594{
595	vaddr_t va;
596	bus_addr_t addr;
597	int curseg;
598	const uvm_flag_t kmflags =
599	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
600
601	/*
602	 * If we're only mapping 1 segment, use P2SEG, to avoid
603	 * TLB thrashing.
604	 */
605	if (nsegs == 1) {
606		*kvap = (void *)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
607		return 0;
608	}
609
610	size = round_page(size);
611
612	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
613
614	if (va == 0)
615		return ENOMEM;
616
617	*kvap = (void *)va;
618
619	for (curseg = 0; curseg < nsegs; curseg++) {
620		for (addr = segs[curseg].ds_addr;
621		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
622		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
623			if (size == 0)
624				panic("gaps_dmamem_map: size botch");
625			pmap_kenter_pa(va, addr,
626			    VM_PROT_READ | VM_PROT_WRITE, 0);
627		}
628	}
629	pmap_update(pmap_kernel());
630
631	return 0;
632}
633
634void
635gaps_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
636{
637
638#ifdef DIAGNOSTIC
639	if ((u_long) kva & PAGE_MASK)
640		panic("gaps_dmamem_unmap");
641#endif
642
643	/*
644	 * Nothing to do if we mapped it with P2SEG.
645	 */
646	if (kva >= (void *)SH3_P2SEG_BASE &&
647	    kva <= (void *)SH3_P2SEG_END)
648		return;
649
650	size = round_page(size);
651	pmap_kremove((vaddr_t) kva, size);
652	pmap_update(pmap_kernel());
653	uvm_km_free(kernel_map, (vaddr_t) kva, size, UVM_KMF_VAONLY);
654}
655
656paddr_t
657gaps_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
658    off_t off, int prot, int flags)
659{
660
661	/* Not implemented. */
662	return -1;
663}
664