1/*	$NetBSD: dvma.c,v 1.37 2012/01/27 18:53:03 para Exp $	*/
2
3/*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gordon W. Ross.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: dvma.c,v 1.37 2012/01/27 18:53:03 para Exp $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/device.h>
38#include <sys/proc.h>
39#include <sys/malloc.h>
40#include <sys/extent.h>
41#include <sys/buf.h>
42#include <sys/vnode.h>
43#include <sys/core.h>
44#include <sys/exec.h>
45
46#include <uvm/uvm.h> /* XXX: not _extern ... need uvm_map_create */
47
48#define _SUN68K_BUS_DMA_PRIVATE
49#include <machine/autoconf.h>
50#include <machine/bus.h>
51#include <machine/cpu.h>
52#include <machine/dvma.h>
53#include <machine/pmap.h>
54#include <machine/pte.h>
55
56#include <sun3/sun3/control.h>
57#include <sun3/sun3/machdep.h>
58
59/* DVMA is the last 1MB, but the PROM owns the last page. */
60#define DVMA_MAP_END	(DVMA_MAP_BASE + DVMA_MAP_AVAIL)
61
62/* Extent map used by dvma_mapin/dvma_mapout */
63struct extent *dvma_extent;
64
65/* XXX: Might need to tune this... */
66vsize_t dvma_segmap_size = 6 * NBSG;
67
68/* Using phys_map to manage DVMA scratch-memory pages. */
69/* Note: Could use separate pagemap for obio if needed. */
70
71void
72dvma_init(void)
73{
74	vaddr_t segmap_addr;
75
76	/*
77	 * Create phys_map covering the entire DVMA space,
78	 * then allocate the segment pool from that.  The
79	 * remainder will be used as the DVMA page pool.
80	 *
81	 * Note that no INTRSAFE is needed here because the
82	 * dvma_extent manages things handled in interrupt
83	 * context.
84	 */
85	phys_map = kmem_alloc(sizeof(struct vm_map), KM_SLEEP);
86	if (phys_map == NULL)
87		panic("unable to create DVMA map");
88
89	uvm_map_setup(phys_map, DVMA_MAP_BASE, DVMA_MAP_END, 0);
90	phys_map->pmap = pmap_kernel();
91
92	/*
93	 * Reserve the DVMA space used for segment remapping.
94	 * The remainder of phys_map is used for DVMA scratch
95	 * memory pages (i.e. driver control blocks, etc.)
96	 */
97	segmap_addr = uvm_km_alloc(phys_map, dvma_segmap_size, 0,
98	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
99	if (segmap_addr != DVMA_MAP_BASE)
100		panic("dvma_init: unable to allocate DVMA segments");
101
102	/*
103	 * Create the VM pool used for mapping whole segments
104	 * into DVMA space for the purpose of data transfer.
105	 */
106	dvma_extent = extent_create("dvma", segmap_addr,
107	    segmap_addr + (dvma_segmap_size - 1),
108	    NULL, 0, EX_NOCOALESCE|EX_NOWAIT);
109}
110
111/*
112 * Allocate actual memory pages in DVMA space.
113 * (idea for implementation borrowed from Chris Torek.)
114 */
115void *
116dvma_malloc(size_t bytes)
117{
118	void *new_mem;
119	vsize_t new_size;
120
121	if (bytes == 0)
122		return NULL;
123	new_size = m68k_round_page(bytes);
124	new_mem = (void *)uvm_km_alloc(phys_map, new_size, 0, UVM_KMF_WIRED);
125	if (new_mem == 0)
126		panic("dvma_malloc: no space in phys_map");
127	/* The pmap code always makes DVMA pages non-cached. */
128	return new_mem;
129}
130
131/*
132 * Free pages from dvma_malloc()
133 */
134void
135dvma_free(void *addr, size_t size)
136{
137	vsize_t sz = m68k_round_page(size);
138
139	uvm_km_free(phys_map, (vaddr_t)addr, sz, UVM_KMF_WIRED);
140}
141
142/*
143 * Given a DVMA address, return the physical address that
144 * would be used by some OTHER bus-master besides the CPU.
145 * (Examples: on-board ie/le, VME xy board).
146 */
147u_long
148dvma_kvtopa(void *kva, int bustype)
149{
150	u_long addr, mask;
151
152	addr = (u_long)kva;
153	if ((addr & DVMA_MAP_BASE) != DVMA_MAP_BASE)
154		panic("dvma_kvtopa: bad dmva addr=0x%lx", addr);
155
156	switch (bustype) {
157	case BUS_OBIO:
158	case BUS_OBMEM:
159		mask = DVMA_OBIO_SLAVE_MASK;
160		break;
161	default:	/* VME bus device. */
162		mask = DVMA_VME_SLAVE_MASK;
163		break;
164	}
165
166	return addr & mask;
167}
168
169/*
170 * Given a range of kernel virtual space, remap all the
171 * pages found there into the DVMA space (dup mappings).
172 * This IS safe to call at interrupt time.
173 * (Typically called at SPLBIO)
174 */
175void *
176dvma_mapin(void *kva, int len, int canwait /* ignored */)
177{
178	vaddr_t seg_kva, seg_dma;
179	vsize_t seg_len, seg_off;
180	vaddr_t v, x;
181	int s, sme, error;
182
183	/* Get seg-aligned address and length. */
184	seg_kva = (vaddr_t)kva;
185	seg_len = (vsize_t)len;
186	seg_off = seg_kva & SEGOFSET;
187	seg_kva -= seg_off;
188	seg_len = sun3_round_seg(seg_len + seg_off);
189
190	s = splvm();
191
192	/* Allocate the DVMA segment(s) */
193
194	error = extent_alloc(dvma_extent, seg_len, NBSG, 0,
195	    EX_FAST | EX_NOWAIT | EX_MALLOCOK, &seg_dma);
196	if (error) {
197		splx(s);
198		return NULL;
199	}
200
201#ifdef	DIAGNOSTIC
202	if (seg_dma & SEGOFSET)
203		panic("dvma_mapin: seg not aligned");
204#endif
205
206	/* Duplicate the mappings into DMA space. */
207	v = seg_kva;
208	x = seg_dma;
209	while (seg_len > 0) {
210		sme = get_segmap(v);
211#ifdef	DIAGNOSTIC
212		if (sme == SEGINV)
213			panic("dvma_mapin: seg not mapped");
214#endif
215#ifdef	HAVECACHE
216		/* flush write-back on old mappings */
217		if (cache_size)
218			cache_flush_segment(v);
219#endif
220		set_segmap_allctx(x, sme);
221		v += NBSG;
222		x += NBSG;
223		seg_len -= NBSG;
224	}
225	seg_dma += seg_off;
226
227	splx(s);
228	return (void *)seg_dma;
229}
230
231/*
232 * Free some DVMA space allocated by the above.
233 * This IS safe to call at interrupt time.
234 * (Typically called at SPLBIO)
235 */
236void
237dvma_mapout(void *dma, int len)
238{
239	vaddr_t seg_dma;
240	vsize_t seg_len, seg_off;
241	vaddr_t v, x;
242	int sme;
243	int s;
244
245	/* Get seg-aligned address and length. */
246	seg_dma = (vaddr_t)dma;
247	seg_len = (vsize_t)len;
248	seg_off = seg_dma & SEGOFSET;
249	seg_dma -= seg_off;
250	seg_len = sun3_round_seg(seg_len + seg_off);
251
252	s = splvm();
253
254	/* Flush cache and remove DVMA mappings. */
255	v = seg_dma;
256	x = v + seg_len;
257	while (v < x) {
258		sme = get_segmap(v);
259#ifdef	DIAGNOSTIC
260		if (sme == SEGINV)
261			panic("dvma_mapout: seg not mapped");
262#endif
263#ifdef	HAVECACHE
264		/* flush write-back on the DVMA mappings */
265		if (cache_size)
266			cache_flush_segment(v);
267#endif
268		set_segmap_allctx(v, SEGINV);
269		v += NBSG;
270	}
271
272	if (extent_free(dvma_extent, seg_dma, seg_len,
273	    EX_NOWAIT | EX_MALLOCOK))
274		panic("dvma_mapout: unable to free 0x%lx,0x%lx",
275		    seg_dma, seg_len);
276	splx(s);
277}
278
279int
280_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
281    int nsegs, bus_size_t size, int flags)
282{
283
284	panic("_bus_dmamap_load_raw(): not implemented yet.");
285}
286
287int
288_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
289    bus_size_t buflen, struct proc *p, int flags)
290{
291	vaddr_t kva, dva;
292	vsize_t off, sgsize;
293	paddr_t pa;
294	pmap_t pmap;
295	int error, rv, s;
296
297	/*
298	 * Make sure that on error condition we return "no valid mappings".
299	 */
300	map->dm_nsegs = 0;
301	map->dm_mapsize = 0;
302
303	if (buflen > map->_dm_size)
304		return EINVAL;
305
306	kva = (vaddr_t)buf;
307	off = kva & PGOFSET;
308	sgsize = round_page(off + buflen);
309
310	/* Try to allocate DVMA space. */
311	s = splvm();
312	error = extent_alloc(dvma_extent, sgsize, PAGE_SIZE, 0,
313	    EX_FAST | ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT),
314	    &dva);
315	splx(s);
316	if (error)
317		return ENOMEM;
318
319	/* Fill in the segment. */
320	map->dm_segs[0].ds_addr = dva + off;
321	map->dm_segs[0].ds_len = buflen;
322	map->dm_segs[0]._ds_va = dva;
323	map->dm_segs[0]._ds_sgsize = sgsize;
324
325	/*
326	 * Now map the DVMA addresses we allocated to point to the
327	 * pages of the caller's buffer.
328	 */
329	if (p != NULL)
330		pmap = p->p_vmspace->vm_map.pmap;
331	else
332		pmap = pmap_kernel();
333
334	while (sgsize > 0) {
335		rv = pmap_extract(pmap, kva, &pa);
336#ifdef DIAGNOSTIC
337		if (rv == false)
338			panic("%s: unmapped VA", __func__);
339#endif
340		pmap_enter(pmap_kernel(), dva, pa | PMAP_NC,
341		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
342		kva += PAGE_SIZE;
343		dva += PAGE_SIZE;
344		sgsize -= PAGE_SIZE;
345	}
346
347	map->dm_nsegs = 1;
348	map->dm_mapsize = map->dm_segs[0].ds_len;
349
350	return 0;
351}
352
353void
354_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
355{
356	bus_dma_segment_t *segs;
357	vaddr_t dva;
358	vsize_t sgsize;
359	int error, s;
360
361#ifdef DIAGNOSTIC
362	if (map->dm_nsegs != 1)
363		panic("%s: invalid nsegs = %d", __func__, map->dm_nsegs);
364#endif
365
366	segs = map->dm_segs;
367	dva = segs[0]._ds_va & ~PGOFSET;
368	sgsize = segs[0]._ds_sgsize;
369
370	/* Unmap the DVMA addresses. */
371	pmap_remove(pmap_kernel(), dva, dva + sgsize);
372	pmap_update(pmap_kernel());
373
374	/* Free the DVMA addresses. */
375	s = splvm();
376	error = extent_free(dvma_extent, dva, sgsize, EX_NOWAIT);
377	splx(s);
378#ifdef DIAGNOSTIC
379	if (error)
380		panic("%s: unable to free DVMA region", __func__);
381#endif
382
383	/* Mark the mappings as invalid. */
384	map->dm_mapsize = 0;
385	map->dm_nsegs = 0;
386}
387